url
string
repository_url
string
labels_url
string
comments_url
string
events_url
string
html_url
string
id
int64
node_id
string
number
int64
title
string
user
dict
labels
list
state
string
locked
bool
assignee
dict
assignees
list
milestone
dict
comments
list
created_at
timestamp[ns, tz=UTC]
updated_at
timestamp[ns, tz=UTC]
closed_at
timestamp[ns, tz=UTC]
author_association
string
type
float64
active_lock_reason
float64
sub_issues_summary
dict
body
string
closed_by
dict
reactions
dict
timeline_url
string
performed_via_github_app
float64
state_reason
string
draft
float64
pull_request
dict
https://api.github.com/repos/huggingface/datasets/issues/4879
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4879/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4879/comments
https://api.github.com/repos/huggingface/datasets/issues/4879/events
https://github.com/huggingface/datasets/pull/4879
1,348,346,407
PR_kwDODunzps49qbOl
4,879
Fix Citation Information section in dataset cards
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4879). All of your documentation changes will be reflected on that endpoint." ]
2022-08-23T18:06:43Z
2022-09-27T14:04:45Z
2022-08-24T04:09:07Z
MEMBER
null
null
null
Fix Citation Information section in dataset cards: - cc_news - conllpp - datacommons_factcheck - gnad10 - id_panl_bppt - jigsaw_toxicity_pred - kinnews_kirnews - kor_sarcasm - makhzan - reasoning_bg - ro_sts - ro_sts_parallel - sanskrit_classic - telugu_news - thaiqa_squad - wiki_movies This PR partially fixes the Citation Information section in dataset cards. Subsequent PRs will follow to complete this task.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4879/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4879/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4879.diff", "html_url": "https://github.com/huggingface/datasets/pull/4879", "merged_at": "2022-08-24T04:09:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/4879.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4879" }
https://api.github.com/repos/huggingface/datasets/issues/5212
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5212/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5212/comments
https://api.github.com/repos/huggingface/datasets/issues/5212/events
https://github.com/huggingface/datasets/pull/5212
1,439,642,483
PR_kwDODunzps5CZPI2
5,212
Fix CI require_beam maximum compatible dill version
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5212). All of your documentation changes will be reflected on that endpoint." ]
2022-11-08T07:30:01Z
2022-11-15T06:32:27Z
2022-11-15T06:32:26Z
MEMBER
null
null
null
A previous commit to main branch introduced an additional requirement on maximum compatible `dill` version with `apache-beam` in our CI `require_beam`: - d7c942228b8dcf4de64b00a3053dce59b335f618 - ec222b220b79f10c8d7b015769f0999b15959feb This PR fixes the maximum compatible `dill` version with `apache-beam`, which is <0.3.2 (and not 0.3.6): https://github.com/apache/beam/blob/v2.42.0/sdks/python/setup.py#L219
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5212/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5212/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5212.diff", "html_url": "https://github.com/huggingface/datasets/pull/5212", "merged_at": "2022-11-15T06:32:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/5212.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5212" }
https://api.github.com/repos/huggingface/datasets/issues/7000
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7000/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7000/comments
https://api.github.com/repos/huggingface/datasets/issues/7000/events
https://github.com/huggingface/datasets/issues/7000
2,372,887,585
I_kwDODunzps6Nb2Qh
7,000
IterableDataset: Unsupported ScalarType BFloat16
{ "avatar_url": "https://avatars.githubusercontent.com/u/170015089?v=4", "events_url": "https://api.github.com/users/stoical07/events{/privacy}", "followers_url": "https://api.github.com/users/stoical07/followers", "following_url": "https://api.github.com/users/stoical07/following{/other_user}", "gists_url": "https://api.github.com/users/stoical07/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stoical07", "id": 170015089, "login": "stoical07", "node_id": "U_kgDOCiI5cQ", "organizations_url": "https://api.github.com/users/stoical07/orgs", "received_events_url": "https://api.github.com/users/stoical07/received_events", "repos_url": "https://api.github.com/users/stoical07/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stoical07/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stoical07/subscriptions", "type": "User", "url": "https://api.github.com/users/stoical07", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "@lhoestq Thank you for merging #6607, but unfortunately the issue persists for `IterableDataset` :pensive: ", "Hi ! I opened https://github.com/huggingface/datasets/pull/7002 to fix this bug", "Amazing, thank you so much @lhoestq! :pray:" ]
2024-06-25T14:43:26Z
2024-06-25T16:04:00Z
2024-06-25T15:51:53Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug `IterableDataset.from_generator` crashes when using BFloat16: ``` File "/usr/local/lib/python3.11/site-packages/datasets/utils/_dill.py", line 169, in _save_torchTensor args = (obj.detach().cpu().numpy(),) ^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: Got unsupported ScalarType BFloat16 ``` ### Steps to reproduce the bug ```python import torch from datasets import IterableDataset def demo(x): yield {"x": x} x = torch.tensor([1.], dtype=torch.bfloat16) dataset = IterableDataset.from_generator( demo, gen_kwargs=dict(x=x), ) example = next(iter(dataset)) print(example) ``` ### Expected behavior Code sample should print: ```python {'x': tensor([1.], dtype=torch.bfloat16)} ``` ### Environment info ``` datasets==2.20.0 torch==2.2.2 ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7000/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7000/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7093
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7093/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7093/comments
https://api.github.com/repos/huggingface/datasets/issues/7093/events
https://github.com/huggingface/datasets/issues/7093
2,454,413,074
I_kwDODunzps6SS18S
7,093
Add Arabic Docs to datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/53489256?v=4", "events_url": "https://api.github.com/users/AhmedAlmaghz/events{/privacy}", "followers_url": "https://api.github.com/users/AhmedAlmaghz/followers", "following_url": "https://api.github.com/users/AhmedAlmaghz/following{/other_user}", "gists_url": "https://api.github.com/users/AhmedAlmaghz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/AhmedAlmaghz", "id": 53489256, "login": "AhmedAlmaghz", "node_id": "MDQ6VXNlcjUzNDg5MjU2", "organizations_url": "https://api.github.com/users/AhmedAlmaghz/orgs", "received_events_url": "https://api.github.com/users/AhmedAlmaghz/received_events", "repos_url": "https://api.github.com/users/AhmedAlmaghz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/AhmedAlmaghz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/AhmedAlmaghz/subscriptions", "type": "User", "url": "https://api.github.com/users/AhmedAlmaghz", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[]
2024-08-07T21:48:05Z
2024-08-07T21:48:05Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request Add Arabic Docs to datasets [Datasets Arabic](https://github.com/AhmedAlmaghz/datasets/blob/main/docs/source/ar/index.mdx) ### Motivation @AhmedAlmaghz https://github.com/AhmedAlmaghz/datasets/blob/main/docs/source/ar/index.mdx ### Your contribution @AhmedAlmaghz https://github.com/AhmedAlmaghz/datasets/blob/main/docs/source/ar/index.mdx
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7093/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7093/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6254
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6254/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6254/comments
https://api.github.com/repos/huggingface/datasets/issues/6254/events
https://github.com/huggingface/datasets/issues/6254
1,909,672,104
I_kwDODunzps5x00io
6,254
Dataset.from_generator() cost much more time in vscode debugging mode then running mode
{ "avatar_url": "https://avatars.githubusercontent.com/u/56437469?v=4", "events_url": "https://api.github.com/users/dontnet-wuenze/events{/privacy}", "followers_url": "https://api.github.com/users/dontnet-wuenze/followers", "following_url": "https://api.github.com/users/dontnet-wuenze/following{/other_user}", "gists_url": "https://api.github.com/users/dontnet-wuenze/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dontnet-wuenze", "id": 56437469, "login": "dontnet-wuenze", "node_id": "MDQ6VXNlcjU2NDM3NDY5", "organizations_url": "https://api.github.com/users/dontnet-wuenze/orgs", "received_events_url": "https://api.github.com/users/dontnet-wuenze/received_events", "repos_url": "https://api.github.com/users/dontnet-wuenze/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dontnet-wuenze/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dontnet-wuenze/subscriptions", "type": "User", "url": "https://api.github.com/users/dontnet-wuenze", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Answered on the forum: https://discuss.huggingface.co/t/dataset-from-generator-cost-much-more-time-in-vscode-debugging-mode-then-running-mode/56005/2" ]
2023-09-23T02:07:26Z
2023-10-03T14:42:53Z
2023-10-03T14:42:53Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hey there, I’m using Dataset.from_generator() to convert a torch_dataset to the Huggingface Dataset. However, when I debug my code on vscode, I find that it runs really slow on Dataset.from_generator() which may even 20 times longer then run the script on terminal. ### Steps to reproduce the bug I write a simple test code : ```python import os from functools import partial from typing import Callable import torch import time from torch.utils.data import Dataset as TorchDataset from datasets import load_from_disk, Dataset as HFDataset import torch from torch.utils.data import Dataset class SimpleDataset(Dataset): def __init__(self, data): self.data = data self.keys = list(data[0].keys()) def __len__(self): return len(self.data) def __getitem__(self, index): sample = self.data[index] return {key: sample[key] for key in self.keys} def TorchDataset2HuggingfaceDataset(torch_dataset: TorchDataset, cache_dir: str = None ) -> HFDataset: """ convert torch dataset to huggingface dataset """ generator : Callable[[], TorchDataset] = lambda: (sample for sample in torch_dataset) return HFDataset.from_generator(generator, cache_dir=cache_dir) if __name__ == '__main__': data = [ {'id': 1, 'name': 'Alice'}, {'id': 2, 'name': 'Bob'}, {'id': 3, 'name': 'Charlie'} ] torch_dataset = SimpleDataset(data) start_time = time.time() huggingface_dataset = TorchDataset2HuggingfaceDataset(torch_dataset) end_time = time.time() print("time: ", end_time - start_time) print(huggingface_dataset) ``` ### Expected behavior this test on my machine report that the running time on terminal is 0.086, however the running time in debugging mode on vscode is 0.25, which I think is much longer than expected. I’d like to know is the anything wrong in the code or just because of debugging? I have traced the code and I find is this func which I get stuck. ```python def create_config_id( self, config_kwargs: dict, custom_features: Optional[Features] = None, ) -> str: ... # stuck in this line suffix = Hasher.hash(config_kwargs_to_add_to_suffix) ``` ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-5.11.0-27-generic-x86_64-with-glibc2.31 - Python version: 3.11.3 - Huggingface_hub version: 0.17.2 - PyArrow version: 11.0.0 - Pandas version: 2.0.1
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6254/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6254/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7196
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7196/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7196/comments
https://api.github.com/repos/huggingface/datasets/issues/7196/events
https://github.com/huggingface/datasets/issues/7196
2,564,218,566
I_kwDODunzps6Y1t7G
7,196
concatenate_datasets does not preserve shuffling state
{ "avatar_url": "https://avatars.githubusercontent.com/u/5719745?v=4", "events_url": "https://api.github.com/users/alex-hh/events{/privacy}", "followers_url": "https://api.github.com/users/alex-hh/followers", "following_url": "https://api.github.com/users/alex-hh/following{/other_user}", "gists_url": "https://api.github.com/users/alex-hh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alex-hh", "id": 5719745, "login": "alex-hh", "node_id": "MDQ6VXNlcjU3MTk3NDU=", "organizations_url": "https://api.github.com/users/alex-hh/orgs", "received_events_url": "https://api.github.com/users/alex-hh/received_events", "repos_url": "https://api.github.com/users/alex-hh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alex-hh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alex-hh/subscriptions", "type": "User", "url": "https://api.github.com/users/alex-hh", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "It also does preserve `split_by_node`, so in the meantime you should call `shuffle` or `split_by_node` AFTER `interleave_datasets` or `concatenate_datasets`" ]
2024-10-03T14:30:38Z
2025-03-18T10:56:47Z
null
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug After concatenate datasets on an iterable dataset, the shuffling state is destroyed, similar to #7156 This means concatenation cant be used for resolving uneven numbers of samples across devices when using iterable datasets in a distributed setting as discussed in #6623 I also noticed that the number of shards is the same after concatenation, which I found surprising, but I don't understand the internals well enough to know whether this is actually surprising or not ### Steps to reproduce the bug ```python import datasets import torch.utils.data def gen(shards): yield {"shards": shards} def main(): dataset1 = datasets.IterableDataset.from_generator( gen, gen_kwargs={"shards": list(range(25))} # TODO: how to understand this? ) dataset2 = datasets.IterableDataset.from_generator( gen, gen_kwargs={"shards": list(range(25, 50))} # TODO: how to understand this? ) dataset1 = dataset1.shuffle(buffer_size=1) dataset2 = dataset2.shuffle(buffer_size=1) print(dataset1.n_shards) print(dataset2.n_shards) dataset = datasets.concatenate_datasets( [dataset1, dataset2] ) print(dataset.n_shards) # dataset = dataset1 dataloader = torch.utils.data.DataLoader( dataset, batch_size=8, num_workers=0, ) for i, batch in enumerate(dataloader): print(batch) print("\nNew epoch") dataset = dataset.set_epoch(1) for i, batch in enumerate(dataloader): print(batch) if __name__ == "__main__": main() ``` ### Expected behavior Shuffling state should be preserved ### Environment info Latest datasets
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7196/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7196/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7232
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7232/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7232/comments
https://api.github.com/repos/huggingface/datasets/issues/7232/events
https://github.com/huggingface/datasets/pull/7232
2,593,720,548
PR_kwDODunzps5-62rY
7,232
(Super tiny doc update) Mention to_polars
{ "avatar_url": "https://avatars.githubusercontent.com/u/5236035?v=4", "events_url": "https://api.github.com/users/fzyzcjy/events{/privacy}", "followers_url": "https://api.github.com/users/fzyzcjy/followers", "following_url": "https://api.github.com/users/fzyzcjy/following{/other_user}", "gists_url": "https://api.github.com/users/fzyzcjy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fzyzcjy", "id": 5236035, "login": "fzyzcjy", "node_id": "MDQ6VXNlcjUyMzYwMzU=", "organizations_url": "https://api.github.com/users/fzyzcjy/orgs", "received_events_url": "https://api.github.com/users/fzyzcjy/received_events", "repos_url": "https://api.github.com/users/fzyzcjy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fzyzcjy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fzyzcjy/subscriptions", "type": "User", "url": "https://api.github.com/users/fzyzcjy", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "You are welcome!" ]
2024-10-17T06:08:53Z
2024-10-24T23:11:05Z
2024-10-24T15:06:16Z
CONTRIBUTOR
null
null
null
polars is also quite popular now, thus this tiny update can tell users polars is supported
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7232/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7232/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7232.diff", "html_url": "https://github.com/huggingface/datasets/pull/7232", "merged_at": "2024-10-24T15:06:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/7232.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7232" }
https://api.github.com/repos/huggingface/datasets/issues/6600
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6600/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6600/comments
https://api.github.com/repos/huggingface/datasets/issues/6600/events
https://github.com/huggingface/datasets/issues/6600
2,088,446,385
I_kwDODunzps58eymx
6,600
Loading CSV exported dataset has unexpected format
{ "avatar_url": "https://avatars.githubusercontent.com/u/59572247?v=4", "events_url": "https://api.github.com/users/OrianeN/events{/privacy}", "followers_url": "https://api.github.com/users/OrianeN/followers", "following_url": "https://api.github.com/users/OrianeN/following{/other_user}", "gists_url": "https://api.github.com/users/OrianeN/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/OrianeN", "id": 59572247, "login": "OrianeN", "node_id": "MDQ6VXNlcjU5NTcyMjQ3", "organizations_url": "https://api.github.com/users/OrianeN/orgs", "received_events_url": "https://api.github.com/users/OrianeN/received_events", "repos_url": "https://api.github.com/users/OrianeN/repos", "site_admin": false, "starred_url": "https://api.github.com/users/OrianeN/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/OrianeN/subscriptions", "type": "User", "url": "https://api.github.com/users/OrianeN", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "Hi! Parquet is the only format that supports complex/nested features such as `Translation`. So, this should work:\r\n```python\r\ntest_dataset = load_dataset(\"opus100\", name=\"en-fr\", split=\"test\")\r\n\r\n# Save with .to_parquet()\r\ntest_parquet_path = \"try_testset_save.parquet\"\r\ntest_dataset.to_parquet(test_parquet_path)\r\n\r\n# Load dataset from the Parquet\r\nloaded_dataset = load_dataset(\"parquet\", data_files=test_parquet_path)\r\nprint(test_dataset_fromfile[0][\"translation\"])\r\nprint(test_dataset_fromfile[0][\"translation\"][\"en\"])\r\n```", "Indeed this works great, thank you !" ]
2024-01-18T14:48:27Z
2024-01-23T14:42:32Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I wanted to be able to save a HF dataset for translations and load it again in another script, but I'm a bit confused with the documentation and the result I've got so I'm opening this issue to ask if this behavior is as expected. ### Steps to reproduce the bug The documentation I've mainly consulted is https://huggingface.co/docs/datasets/v2.16.1/en/package_reference/loading_methods#datasets.load_dataset and https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset (where I've found `.to_csv()`) ```python # Load a dataset of translations test_dataset = load_dataset("opus100", name="en-fr", split="test") # Save with .to_csv() test_csv_path = "try_testset_save.csv" test_dataset.to_csv(test_csv_path) # Load dataset from the CSV loaded_dataset = load_dataset("csv", data_files=test_csv_path) print(test_dataset_fromfile[0]["translation"]) print(test_dataset_fromfile[0]["translation"]["en"]) ``` ``` Creating CSV from Arrow format: 100% 2/2 [00:00<00:00, 47.99ba/s] Downloading data files: 100% 1/1 [00:00<00:00, 65.33it/s] Extracting data files: 100% 1/1 [00:00<00:00, 42.10it/s] Generating train split: 2000/0 [00:00<00:00, 47486.09 examples/s] {'en': "She wasn't going to vaccinate her kid against polio, no way.", 'fr': 'Elle ne vaccinerait pas son enfant contre la polio. Pas question.'} --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[29], line 11 9 loaded_dataset = load_dataset("csv", data_files=test_csv_path) 10 print(test_dataset_fromfile[0]["translation"]) ---> 11 print(test_dataset_fromfile[0]["translation"]["en"]) TypeError: string indices must be integers, not 'str' ``` ### Expected behavior Each translation was saved as a stringified dict like `"{'en': ""She wasn't going to vaccinate her kid against polio, no way."", 'fr': 'Elle ne vaccinerait pas son enfant contre la polio. Pas question.'}"` where I would have expected 2 columns (1st with English segments, and 2nd with French segments), and I was expecting `load_dataset` to infer the type of feature automatically as I haven't seen anything about it in the documentation. Do you have an example of how to effectively save and load datasets of translations ? ### Environment info - `datasets` version: 2.15.0 - Platform: Linux-3.10.0-1160.36.2.el7.x86_64-x86_64-with-glibc2.17 - Python version: 3.11.5 - `huggingface_hub` version: 0.16.4 - PyArrow version: 14.0.2 - Pandas version: 2.1.4 - `fsspec` version: 2023.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6600/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6600/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6884
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6884/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6884/comments
https://api.github.com/repos/huggingface/datasets/issues/6884/events
https://github.com/huggingface/datasets/issues/6884
2,284,839,687
I_kwDODunzps6IL-MH
6,884
CI is broken after jax-0.4.27 release: AttributeError: 'jaxlib.xla_extension.DeviceList' object has no attribute 'device'
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[]
2024-05-08T07:01:47Z
2024-05-08T09:35:17Z
2024-05-08T09:35:17Z
MEMBER
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
After jax-0.4.27 release (https://github.com/google/jax/releases/tag/jax-v0.4.27), our CI is broken with the error: ```Python traceback AttributeError: 'jaxlib.xla_extension.DeviceList' object has no attribute 'device'. Did you mean: 'devices'? ``` See: https://github.com/huggingface/datasets/actions/runs/8997488610/job/24715736153 ```Python traceback ___________________ FormatterTest.test_jax_formatter_device ____________________ [gw1] linux -- Python 3.10.14 /opt/hostedtoolcache/Python/3.10.14/x64/bin/python self = <tests.test_formatting.FormatterTest testMethod=test_jax_formatter_device> @require_jax def test_jax_formatter_device(self): import jax from datasets.formatting import JaxFormatter pa_table = self._create_dummy_table() device = jax.devices()[0] formatter = JaxFormatter(device=str(device)) row = formatter.format_row(pa_table) > assert row["a"].device() == device E AttributeError: 'jaxlib.xla_extension.DeviceList' object has no attribute 'device'. Did you mean: 'devices'? tests/test_formatting.py:630: AttributeError ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6884/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6884/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6876
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6876/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6876/comments
https://api.github.com/repos/huggingface/datasets/issues/6876/events
https://github.com/huggingface/datasets/pull/6876
2,281,450,743
PR_kwDODunzps5uqs46
6,876
Unpin hfh
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6876). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "transformers 4.40.2 was release yesterday but not sure if it contains the fix", "@lhoestq yes I knew transformers 4.40.2 was released yesterday, but I had checked that it does not contain the fix: only 2 bug fixes. That is why our CI continues failing in this PR. We will have to wait until the next minor version.", "> If we urgently need some dev feature for dataset-viewer, I would suggest pushing the feature (cherry-picked) to a dedicated branch with 2.19.1 as its starting point (without opening a PR), and install datasets from that branch.\r\n\r\nI have done so:\r\n- Created a branch from 2.19.1: https://github.com/huggingface/datasets/tree/datasets-2.19.1-hotfix\r\n- Cherry-picked the commit in this PR: https://github.com/huggingface/datasets/commit/3638183e2f7e0dce8924e46e7cc21bf6d5d7adfb\r\n- Opened a PR in dataset-viewer to update datasets to this revision: https://github.com/huggingface/dataset-viewer/pull/2783", "hfh 0.23.1 and transformers 4.41.0 as are out out, let's unpin no ?", "I have re-run the CI to check that is green before.", "The errors were coming from `transformers` having FutureWarning when loading models or tokenizers. I disabled the warnings for the `transformers`-related calls since they're not related to `datasets`", "I opened an issue in transformers:\r\n- https://github.com/huggingface/transformers/issues/31002", "It's because the error from the FutureWarning happened when running `cache_file()` from `transformers`, which has some code that try/except and re-raise an OSError", "Opened https://github.com/huggingface/transformers/pull/31007 to fix the FutureWarning in transformers. Sorry, thought it was fixed by https://github.com/huggingface/transformers/issues/30618 but clearly an oversight from my side.\r\n\r\nRegarding the pytest config, yes I remember adding it and in general I still think it's a good idea to have it. Will be more careful next time to update `transformers` before `huggingface_hub`'s release and not the other way around (first time it happens since I've set this value :grimacing:). For a temporary fix in `datasets` I would rather temporarily disable the filterwarnings in `datasets` rather then adding filters in the test code. ", "alright I disabled the errors on FutureWarning, do you see anything else @albertvillanova or we can merge ?", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005165 / 0.011353 (-0.006188) | 0.003991 / 0.011008 (-0.007017) | 0.064029 / 0.038508 (0.025521) | 0.031578 / 0.023109 (0.008468) | 0.242646 / 0.275898 (-0.033252) | 0.261834 / 0.323480 (-0.061646) | 0.003032 / 0.007986 (-0.004953) | 0.002659 / 0.004328 (-0.001670) | 0.049868 / 0.004250 (0.045618) | 0.047607 / 0.037052 (0.010555) | 0.250537 / 0.258489 (-0.007952) | 0.289460 / 0.293841 (-0.004381) | 0.027225 / 0.128546 (-0.101321) | 0.010496 / 0.075646 (-0.065151) | 0.208455 / 0.419271 (-0.210816) | 0.036813 / 0.043533 (-0.006720) | 0.243361 / 0.255139 (-0.011778) | 0.267477 / 0.283200 (-0.015723) | 0.020402 / 0.141683 (-0.121281) | 1.117118 / 1.452155 (-0.335037) | 1.154868 / 1.492716 (-0.337849) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096796 / 0.018006 (0.078790) | 0.304588 / 0.000490 (0.304098) | 0.000217 / 0.000200 (0.000017) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019221 / 0.037411 (-0.018190) | 0.062897 / 0.014526 (0.048371) | 0.076446 / 0.176557 (-0.100111) | 0.124476 / 0.737135 (-0.612659) | 0.079921 / 0.296338 (-0.216418) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284442 / 0.215209 (0.069233) | 2.799419 / 2.077655 (0.721764) | 1.468022 / 1.504120 (-0.036098) | 1.354013 / 1.541195 (-0.187182) | 1.379985 / 1.468490 (-0.088505) | 0.561723 / 4.584777 (-4.023054) | 2.408887 / 3.745712 (-1.336825) | 2.712591 / 5.269862 (-2.557271) | 1.803132 / 4.565676 (-2.762544) | 0.063010 / 0.424275 (-0.361265) | 0.005030 / 0.007607 (-0.002577) | 0.339065 / 0.226044 (0.113021) | 3.373667 / 2.268929 (1.104738) | 1.861569 / 55.444624 (-53.583056) | 1.551357 / 6.876477 (-5.325120) | 1.701885 / 2.142072 (-0.440187) | 0.645685 / 4.805227 (-4.159543) | 0.117915 / 6.500664 (-6.382749) | 0.042656 / 0.075469 (-0.032814) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.957397 / 1.841788 (-0.884391) | 11.544300 / 8.074308 (3.469992) | 9.761814 / 10.191392 (-0.429578) | 0.134766 / 0.680424 (-0.545658) | 0.015387 / 0.534201 (-0.518814) | 0.285692 / 0.579283 (-0.293591) | 0.269201 / 0.434364 (-0.165163) | 0.328198 / 0.540337 (-0.212140) | 0.422315 / 1.386936 (-0.964621) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005333 / 0.011353 (-0.006020) | 0.003638 / 0.011008 (-0.007370) | 0.050503 / 0.038508 (0.011994) | 0.032240 / 0.023109 (0.009130) | 0.267602 / 0.275898 (-0.008296) | 0.293125 / 0.323480 (-0.030355) | 0.004275 / 0.007986 (-0.003710) | 0.002714 / 0.004328 (-0.001615) | 0.049341 / 0.004250 (0.045090) | 0.040364 / 0.037052 (0.003311) | 0.281096 / 0.258489 (0.022607) | 0.312615 / 0.293841 (0.018774) | 0.029981 / 0.128546 (-0.098565) | 0.010230 / 0.075646 (-0.065416) | 0.059218 / 0.419271 (-0.360054) | 0.033360 / 0.043533 (-0.010172) | 0.269518 / 0.255139 (0.014379) | 0.287559 / 0.283200 (0.004360) | 0.018159 / 0.141683 (-0.123524) | 1.107148 / 1.452155 (-0.345006) | 1.170731 / 1.492716 (-0.321985) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095942 / 0.018006 (0.077936) | 0.304914 / 0.000490 (0.304425) | 0.000227 / 0.000200 (0.000027) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022609 / 0.037411 (-0.014803) | 0.076455 / 0.014526 (0.061929) | 0.088170 / 0.176557 (-0.088386) | 0.128485 / 0.737135 (-0.608651) | 0.092471 / 0.296338 (-0.203867) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291471 / 0.215209 (0.076262) | 2.822666 / 2.077655 (0.745012) | 1.531679 / 1.504120 (0.027559) | 1.405931 / 1.541195 (-0.135263) | 1.418893 / 1.468490 (-0.049597) | 0.576128 / 4.584777 (-4.008649) | 0.969466 / 3.745712 (-2.776246) | 2.831998 / 5.269862 (-2.437863) | 1.788814 / 4.565676 (-2.776863) | 0.064141 / 0.424275 (-0.360134) | 0.005126 / 0.007607 (-0.002482) | 0.341699 / 0.226044 (0.115654) | 3.320551 / 2.268929 (1.051622) | 1.903350 / 55.444624 (-53.541274) | 1.611809 / 6.876477 (-5.264668) | 1.729355 / 2.142072 (-0.412717) | 0.654622 / 4.805227 (-4.150605) | 0.118739 / 6.500664 (-6.381925) | 0.041453 / 0.075469 (-0.034016) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.017635 / 1.841788 (-0.824153) | 12.275948 / 8.074308 (4.201640) | 10.416224 / 10.191392 (0.224832) | 0.142288 / 0.680424 (-0.538135) | 0.015591 / 0.534201 (-0.518610) | 0.286515 / 0.579283 (-0.292768) | 0.128661 / 0.434364 (-0.305703) | 0.325728 / 0.540337 (-0.214609) | 0.415827 / 1.386936 (-0.971109) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b442aa2d3efc83ba0dc369adaa63cc496e3d9836 \"CML watermark\")\n" ]
2024-05-06T18:10:49Z
2024-05-27T10:20:42Z
2024-05-27T10:14:40Z
MEMBER
null
null
null
Needed to use those in dataset-viewer: - dev version of hfh https://github.com/huggingface/dataset-viewer/pull/2781: don't span the hub with /paths-info requests - dev version of datasets at https://github.com/huggingface/datasets/pull/6875: don't write too big logs in the viewer close https://github.com/huggingface/datasets/issues/6863
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6876/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6876/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6876.diff", "html_url": "https://github.com/huggingface/datasets/pull/6876", "merged_at": "2024-05-27T10:14:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/6876.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6876" }
https://api.github.com/repos/huggingface/datasets/issues/6358
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6358/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6358/comments
https://api.github.com/repos/huggingface/datasets/issues/6358/events
https://github.com/huggingface/datasets/issues/6358
1,965,014,595
I_kwDODunzps51H75D
6,358
Mounting datasets cache fails due to absolute paths.
{ "avatar_url": "https://avatars.githubusercontent.com/u/72921588?v=4", "events_url": "https://api.github.com/users/charliebudd/events{/privacy}", "followers_url": "https://api.github.com/users/charliebudd/followers", "following_url": "https://api.github.com/users/charliebudd/following{/other_user}", "gists_url": "https://api.github.com/users/charliebudd/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/charliebudd", "id": 72921588, "login": "charliebudd", "node_id": "MDQ6VXNlcjcyOTIxNTg4", "organizations_url": "https://api.github.com/users/charliebudd/orgs", "received_events_url": "https://api.github.com/users/charliebudd/received_events", "repos_url": "https://api.github.com/users/charliebudd/repos", "site_admin": false, "starred_url": "https://api.github.com/users/charliebudd/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/charliebudd/subscriptions", "type": "User", "url": "https://api.github.com/users/charliebudd", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "You may be able to make it work by tweaking some environment variables, such as [`HF_HOME`](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#hfhome) or [`HF_DATASETS_CACHE`](https://huggingface.co/docs/datasets/cache#cache-directory).", "> You may be able to make it work by tweaking some environment variables, such as [`HF_HOME`](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#hfhome) or [`HF_DATASETS_CACHE`](https://huggingface.co/docs/datasets/cache#cache-directory).\r\n\r\nI am already doing this. The problem is that, while this seemingly allows flexibility, the absolute paths written into the cache still have the old cache directory. The paths written into the cache should be relative to the cache location to allow this sort of flexibility. Sorry, I omitted this in the reproduction steps, I have now added it.", "I'm unable to reproduce this with the cache\r\n```bash\r\nexport HF_CACHE=$PWD/hf_cache\r\npython -c \"import datasets; datasets.load_dataset('imdb')\"\r\n```\r\nimported inside a dummy container that is built from\r\n```bash\r\nFROM python:3.9\r\n\r\nWORKDIR /usr/src/app\r\n\r\nRUN pip install datasets\r\n\r\nCOPY ./hf_cache ./hf_cache\r\n\r\nENV HF_HOME=./hf_cache\r\nENV HF_DATASETS_OFFLINE=1\r\n\r\nCMD [\"python\"]\r\n```\r\nWhat do you mean by \"absolute paths written into the cache\"? Paths inside the HF cache paths are based on hash (hashed URL of the downloaded files, etc.)", "@mariosasko Same problem: the absolute paths written into the cache still have the old cache directory. Like:\r\n\r\n{'bytes': None, 'path': 'E:\\\\work-20240321\\\\datasets\\\\downloads\\\\extracted\\\\9752883596854dc57e01c74cc3f494b2ba63754dadd9e77f9d1932deddbd2273\\\\58f33a03-026f-4adc-b69f-b89d16b9f35a.webp'}\r\n\r\nWhen I move this cached directory to another directory, these datasets cannot be used casue path changes. So, the paths written into the cache should be relative to the cache location to allow this sort of flexibility. ", "Sorry, the reply on this thread escaped my attention. The problem with @mariosasko's attempted reproduction is the absolute path `./hf_cache` is the same in the host system and the docker container, so naturally the paths would be correct. Modifying the docker image as below should reproduce the error...\r\n\r\n```\r\nFROM python:3.9\r\n\r\nWORKDIR /usr/src/app\r\n\r\nRUN pip install datasets\r\n\r\nCOPY ./hf_cache ./my_cache/\r\n\r\nENV HF_HOME=./my_cache/\r\nENV HF_DATASETS_OFFLINE=1\r\n\r\nCMD [\"python\"]\r\n```\r\n\r\nThe paths written inside the cache will still have `./hf_cache` prefixing all the paths. If they were relative paths (relative to the top level of the cache) this would be avoided." ]
2023-10-27T08:20:27Z
2024-04-10T08:50:06Z
2023-11-28T14:47:12Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Creating a datasets cache and mounting this into, for example, a docker container, renders the data unreadable due to absolute paths written into the cache. ### Steps to reproduce the bug 1. Create a datasets cache by downloading some data 2. Mount the dataset folder into a docker container or remote system. 3. (Edit) Set `HF_HOME` or `HF_DATASET_CACHE` to point to the mounted cache. 4. Attempt to access the data from within the docker container. 5. An error is thrown saying no file exists at \<absolute path to original cache location\> ### Expected behavior The data is loaded without error ### Environment info - `datasets` version: 2.14.4 - Platform: Linux-5.4.0-162-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - Huggingface_hub version: 0.16.4 - PyArrow version: 13.0.0 - Pandas version: 2.0.3
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6358/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6358/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6723
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6723/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6723/comments
https://api.github.com/repos/huggingface/datasets/issues/6723/events
https://github.com/huggingface/datasets/pull/6723
2,174,344,456
PR_kwDODunzps5o_fPU
6,723
get_dataset_default_config_name docstring
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6723). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005658 / 0.011353 (-0.005694) | 0.003883 / 0.011008 (-0.007125) | 0.064007 / 0.038508 (0.025499) | 0.030370 / 0.023109 (0.007261) | 0.246677 / 0.275898 (-0.029221) | 0.270846 / 0.323480 (-0.052634) | 0.003102 / 0.007986 (-0.004884) | 0.002931 / 0.004328 (-0.001397) | 0.049446 / 0.004250 (0.045196) | 0.043555 / 0.037052 (0.006503) | 0.261810 / 0.258489 (0.003321) | 0.289705 / 0.293841 (-0.004136) | 0.028676 / 0.128546 (-0.099870) | 0.010778 / 0.075646 (-0.064868) | 0.210604 / 0.419271 (-0.208667) | 0.035987 / 0.043533 (-0.007546) | 0.248034 / 0.255139 (-0.007105) | 0.265019 / 0.283200 (-0.018181) | 0.018522 / 0.141683 (-0.123161) | 1.096364 / 1.452155 (-0.355791) | 1.152750 / 1.492716 (-0.339966) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093987 / 0.018006 (0.075981) | 0.306143 / 0.000490 (0.305653) | 0.000218 / 0.000200 (0.000018) | 0.000045 / 0.000054 (-0.000009) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018727 / 0.037411 (-0.018685) | 0.061983 / 0.014526 (0.047457) | 0.074254 / 0.176557 (-0.102303) | 0.121256 / 0.737135 (-0.615880) | 0.076756 / 0.296338 (-0.219582) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.278824 / 0.215209 (0.063615) | 2.815960 / 2.077655 (0.738305) | 1.472946 / 1.504120 (-0.031174) | 1.349722 / 1.541195 (-0.191473) | 1.327844 / 1.468490 (-0.140646) | 0.574964 / 4.584777 (-4.009813) | 2.403458 / 3.745712 (-1.342254) | 2.769293 / 5.269862 (-2.500569) | 1.736970 / 4.565676 (-2.828706) | 0.063144 / 0.424275 (-0.361131) | 0.004983 / 0.007607 (-0.002625) | 0.331212 / 0.226044 (0.105168) | 3.231496 / 2.268929 (0.962567) | 1.798487 / 55.444624 (-53.646138) | 1.523010 / 6.876477 (-5.353467) | 1.559973 / 2.142072 (-0.582099) | 0.657036 / 4.805227 (-4.148191) | 0.119084 / 6.500664 (-6.381580) | 0.042982 / 0.075469 (-0.032487) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.976433 / 1.841788 (-0.865355) | 11.475946 / 8.074308 (3.401638) | 9.339369 / 10.191392 (-0.852023) | 0.141761 / 0.680424 (-0.538662) | 0.014506 / 0.534201 (-0.519695) | 0.289944 / 0.579283 (-0.289340) | 0.273667 / 0.434364 (-0.160697) | 0.326682 / 0.540337 (-0.213655) | 0.458946 / 1.386936 (-0.927990) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005194 / 0.011353 (-0.006159) | 0.003713 / 0.011008 (-0.007295) | 0.049297 / 0.038508 (0.010789) | 0.029723 / 0.023109 (0.006614) | 0.278664 / 0.275898 (0.002766) | 0.296387 / 0.323480 (-0.027093) | 0.004215 / 0.007986 (-0.003771) | 0.002680 / 0.004328 (-0.001648) | 0.048276 / 0.004250 (0.044025) | 0.044454 / 0.037052 (0.007402) | 0.290510 / 0.258489 (0.032021) | 0.319028 / 0.293841 (0.025187) | 0.029177 / 0.128546 (-0.099369) | 0.010361 / 0.075646 (-0.065285) | 0.056993 / 0.419271 (-0.362279) | 0.050765 / 0.043533 (0.007232) | 0.278234 / 0.255139 (0.023095) | 0.295848 / 0.283200 (0.012649) | 0.018776 / 0.141683 (-0.122906) | 1.134866 / 1.452155 (-0.317288) | 1.204083 / 1.492716 (-0.288634) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094397 / 0.018006 (0.076391) | 0.304693 / 0.000490 (0.304203) | 0.000207 / 0.000200 (0.000007) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021322 / 0.037411 (-0.016090) | 0.075384 / 0.014526 (0.060859) | 0.086961 / 0.176557 (-0.089596) | 0.124424 / 0.737135 (-0.612711) | 0.087802 / 0.296338 (-0.208536) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.305542 / 0.215209 (0.090333) | 2.980678 / 2.077655 (0.903023) | 1.632348 / 1.504120 (0.128228) | 1.501466 / 1.541195 (-0.039728) | 1.517681 / 1.468490 (0.049191) | 0.579318 / 4.584777 (-4.005459) | 2.460734 / 3.745712 (-1.284978) | 2.650164 / 5.269862 (-2.619697) | 1.752061 / 4.565676 (-2.813615) | 0.064561 / 0.424275 (-0.359714) | 0.005097 / 0.007607 (-0.002510) | 0.359613 / 0.226044 (0.133569) | 3.518549 / 2.268929 (1.249620) | 1.962575 / 55.444624 (-53.482050) | 1.686108 / 6.876477 (-5.190369) | 1.787873 / 2.142072 (-0.354199) | 0.653715 / 4.805227 (-4.151512) | 0.117617 / 6.500664 (-6.383048) | 0.040359 / 0.075469 (-0.035110) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.021533 / 1.841788 (-0.820255) | 11.974817 / 8.074308 (3.900509) | 10.073530 / 10.191392 (-0.117862) | 0.141477 / 0.680424 (-0.538947) | 0.015081 / 0.534201 (-0.519120) | 0.292622 / 0.579283 (-0.286661) | 0.291043 / 0.434364 (-0.143321) | 0.347822 / 0.540337 (-0.192516) | 0.443647 / 1.386936 (-0.943289) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#6fb6c834f008996c994b0a86c3808d0a33d44525 \"CML watermark\")\n" ]
2024-03-07T17:09:29Z
2024-03-07T17:27:29Z
2024-03-07T17:21:20Z
MEMBER
null
null
null
fix https://github.com/huggingface/datasets/pull/6722
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6723/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6723/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6723.diff", "html_url": "https://github.com/huggingface/datasets/pull/6723", "merged_at": "2024-03-07T17:21:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/6723.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6723" }
https://api.github.com/repos/huggingface/datasets/issues/7480
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7480/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7480/comments
https://api.github.com/repos/huggingface/datasets/issues/7480/events
https://github.com/huggingface/datasets/issues/7480
2,950,315,214
I_kwDODunzps6v2jzO
7,480
HF_DATASETS_CACHE ignored?
{ "avatar_url": "https://avatars.githubusercontent.com/u/31896?v=4", "events_url": "https://api.github.com/users/stephenroller/events{/privacy}", "followers_url": "https://api.github.com/users/stephenroller/followers", "following_url": "https://api.github.com/users/stephenroller/following{/other_user}", "gists_url": "https://api.github.com/users/stephenroller/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stephenroller", "id": 31896, "login": "stephenroller", "node_id": "MDQ6VXNlcjMxODk2", "organizations_url": "https://api.github.com/users/stephenroller/orgs", "received_events_url": "https://api.github.com/users/stephenroller/received_events", "repos_url": "https://api.github.com/users/stephenroller/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stephenroller/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stephenroller/subscriptions", "type": "User", "url": "https://api.github.com/users/stephenroller", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "FWIW, it does eventually write to /tmp/roller/datasets when generating the final version.", "Hey, I’d love to work on this issue but I am a beginner, can I work it with you?", "Hi @lhoestq,\nI'd like to look into this issue but I'm still learning. Could you share any quick pointers on the HF_DATASETS_CACHE behavior here? Thanks!", "Hi ! `HF_DATASETS_CACHE` is only for the cache files of the `datasets` library, not for the `huggingface_hub` cache for files downloaded from the Hugging Face Hub.\n\nYou should either specify `HF_HOME` (parent cache path for everything HF) or both `HF_DATASETS_CACHE` and `HF_HUB_CACHE`" ]
2025-03-26T17:19:34Z
2025-04-08T13:04:45Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I'm struggling to get things to respect HF_DATASETS_CACHE. Rationale: I'm on a system that uses NFS for homedir, so downloading to NFS is expensive, slow, and wastes valuable quota compared to local disk. Instead, it seems to rely mostly on HF_HUB_CACHE. Current version: 3.2.1dev. In the process of testing 3.4.0 ### Steps to reproduce the bug [Currently writing using datasets 3.2.1dev. Will follow up with 3.4.0 results] dump.py: ```python from datasets import load_dataset dataset = load_dataset("HuggingFaceFW/fineweb", name="sample-100BT", split="train") ``` Repro steps ```bash # ensure no cache $ mv ~/.cache/huggingface ~/.cache/huggingface.bak $ export HF_DATASETS_CACHE=/tmp/roller/datasets $ rm -rf ${HF_DATASETS_CACHE} $ env | grep HF | grep -v TOKEN HF_DATASETS_CACHE=/tmp/roller/datasets $ python dump.py # (omitted for brevity) # (while downloading) $ du -hcs ~/.cache/huggingface/hub 18G hub 18G total # (after downloading) $ du -hcs ~/.cache/huggingface/hub ``` It's a shame because datasets supports s3 (which I could really use right now) but hub does not. ### Expected behavior * ~/.cache/huggingface/hub stays empty * /tmp/roller/datasets becomes full of stuff ### Environment info [Currently writing using datasets 3.2.1dev. Will follow up with 3.4.0 results]
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7480/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7480/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6024
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6024/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6024/comments
https://api.github.com/repos/huggingface/datasets/issues/6024/events
https://github.com/huggingface/datasets/pull/6024
1,801,708,808
PR_kwDODunzps5VWbGe
6,024
Don't reference self in Spark._validate_cache_dir
{ "avatar_url": "https://avatars.githubusercontent.com/u/106995444?v=4", "events_url": "https://api.github.com/users/maddiedawson/events{/privacy}", "followers_url": "https://api.github.com/users/maddiedawson/followers", "following_url": "https://api.github.com/users/maddiedawson/following{/other_user}", "gists_url": "https://api.github.com/users/maddiedawson/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/maddiedawson", "id": 106995444, "login": "maddiedawson", "node_id": "U_kgDOBmCe9A", "organizations_url": "https://api.github.com/users/maddiedawson/orgs", "received_events_url": "https://api.github.com/users/maddiedawson/received_events", "repos_url": "https://api.github.com/users/maddiedawson/repos", "site_admin": false, "starred_url": "https://api.github.com/users/maddiedawson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maddiedawson/subscriptions", "type": "User", "url": "https://api.github.com/users/maddiedawson", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Ptal @lhoestq :) I tested this manually on a multi-node Databricks cluster", "Hm looks like the check_code_quality failures are unrelated to me change... https://github.com/huggingface/datasets/actions/runs/5536162850/jobs/10103451883?pr=6024", "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005952 / 0.011353 (-0.005400) | 0.003585 / 0.011008 (-0.007424) | 0.079163 / 0.038508 (0.040655) | 0.057926 / 0.023109 (0.034817) | 0.326647 / 0.275898 (0.050749) | 0.383485 / 0.323480 (0.060005) | 0.004530 / 0.007986 (-0.003456) | 0.002821 / 0.004328 (-0.001508) | 0.062071 / 0.004250 (0.057820) | 0.048023 / 0.037052 (0.010971) | 0.329368 / 0.258489 (0.070879) | 0.390877 / 0.293841 (0.097036) | 0.026959 / 0.128546 (-0.101588) | 0.007911 / 0.075646 (-0.067735) | 0.259956 / 0.419271 (-0.159315) | 0.044582 / 0.043533 (0.001049) | 0.320537 / 0.255139 (0.065398) | 0.373814 / 0.283200 (0.090614) | 0.020275 / 0.141683 (-0.121408) | 1.532128 / 1.452155 (0.079973) | 1.595031 / 1.492716 (0.102315) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.186127 / 0.018006 (0.168120) | 0.428586 / 0.000490 (0.428097) | 0.005180 / 0.000200 (0.004980) | 0.000069 / 0.000054 (0.000015) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024876 / 0.037411 (-0.012536) | 0.072169 / 0.014526 (0.057643) | 0.082015 / 0.176557 (-0.094542) | 0.147467 / 0.737135 (-0.589668) | 0.082769 / 0.296338 (-0.213570) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.410625 / 0.215209 (0.195416) | 4.116742 / 2.077655 (2.039088) | 2.172291 / 1.504120 (0.668171) | 2.022462 / 1.541195 (0.481268) | 2.048142 / 1.468490 (0.579651) | 0.503152 / 4.584777 (-4.081625) | 3.019135 / 3.745712 (-0.726577) | 3.589451 / 5.269862 (-1.680410) | 2.206876 / 4.565676 (-2.358801) | 0.057687 / 0.424275 (-0.366588) | 0.006560 / 0.007607 (-0.001047) | 0.475585 / 0.226044 (0.249541) | 4.784344 / 2.268929 (2.515416) | 2.506322 / 55.444624 (-52.938302) | 2.168251 / 6.876477 (-4.708225) | 2.324453 / 2.142072 (0.182381) | 0.590609 / 4.805227 (-4.214618) | 0.124178 / 6.500664 (-6.376486) | 0.059197 / 0.075469 (-0.016272) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.212359 / 1.841788 (-0.629429) | 17.915843 / 8.074308 (9.841535) | 13.128330 / 10.191392 (2.936938) | 0.144805 / 0.680424 (-0.535618) | 0.016889 / 0.534201 (-0.517312) | 0.344056 / 0.579283 (-0.235227) | 0.359370 / 0.434364 (-0.074994) | 0.404199 / 0.540337 (-0.136138) | 0.549117 / 1.386936 (-0.837819) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005914 / 0.011353 (-0.005439) | 0.003565 / 0.011008 (-0.007443) | 0.061575 / 0.038508 (0.023067) | 0.057677 / 0.023109 (0.034568) | 0.359753 / 0.275898 (0.083855) | 0.394135 / 0.323480 (0.070655) | 0.004648 / 0.007986 (-0.003338) | 0.002795 / 0.004328 (-0.001534) | 0.061877 / 0.004250 (0.057626) | 0.049673 / 0.037052 (0.012621) | 0.363120 / 0.258489 (0.104631) | 0.402685 / 0.293841 (0.108844) | 0.027021 / 0.128546 (-0.101525) | 0.008006 / 0.075646 (-0.067641) | 0.067398 / 0.419271 (-0.351874) | 0.044442 / 0.043533 (0.000909) | 0.364851 / 0.255139 (0.109712) | 0.387219 / 0.283200 (0.104019) | 0.027267 / 0.141683 (-0.114416) | 1.466675 / 1.452155 (0.014520) | 1.512607 / 1.492716 (0.019891) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.206156 / 0.018006 (0.188150) | 0.410877 / 0.000490 (0.410387) | 0.003061 / 0.000200 (0.002861) | 0.000068 / 0.000054 (0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024869 / 0.037411 (-0.012542) | 0.075736 / 0.014526 (0.061210) | 0.083922 / 0.176557 (-0.092634) | 0.139510 / 0.737135 (-0.597626) | 0.087685 / 0.296338 (-0.208654) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.414473 / 0.215209 (0.199264) | 4.150633 / 2.077655 (2.072979) | 2.132892 / 1.504120 (0.628773) | 1.964072 / 1.541195 (0.422878) | 2.003353 / 1.468490 (0.534863) | 0.498012 / 4.584777 (-4.086765) | 3.010135 / 3.745712 (-0.735577) | 2.841130 / 5.269862 (-2.428732) | 1.826013 / 4.565676 (-2.739664) | 0.057443 / 0.424275 (-0.366832) | 0.006374 / 0.007607 (-0.001234) | 0.490337 / 0.226044 (0.264292) | 4.889628 / 2.268929 (2.620700) | 2.575626 / 55.444624 (-52.868998) | 2.246522 / 6.876477 (-4.629955) | 2.276183 / 2.142072 (0.134110) | 0.581465 / 4.805227 (-4.223763) | 0.123877 / 6.500664 (-6.376787) | 0.060339 / 0.075469 (-0.015130) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.333202 / 1.841788 (-0.508585) | 18.363558 / 8.074308 (10.289250) | 14.109356 / 10.191392 (3.917964) | 0.147358 / 0.680424 (-0.533066) | 0.016813 / 0.534201 (-0.517388) | 0.334815 / 0.579283 (-0.244468) | 0.366576 / 0.434364 (-0.067788) | 0.397223 / 0.540337 (-0.143115) | 0.547893 / 1.386936 (-0.839043) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#67ac60bcbebe9ddac70264951b1d584c93003cdf \"CML watermark\")\n" ]
2023-07-12T20:31:16Z
2023-07-13T16:58:32Z
2023-07-13T12:37:09Z
CONTRIBUTOR
null
null
null
Fix for https://github.com/huggingface/datasets/issues/5963
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6024/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6024/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6024.diff", "html_url": "https://github.com/huggingface/datasets/pull/6024", "merged_at": "2023-07-13T12:37:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/6024.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6024" }
https://api.github.com/repos/huggingface/datasets/issues/4769
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4769/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4769/comments
https://api.github.com/repos/huggingface/datasets/issues/4769/events
https://github.com/huggingface/datasets/issues/4769
1,322,121,554
I_kwDODunzps5OzflS
4,769
Fail to process SQuADv1.1 datasets with max_seq_length=128, doc_stride=96.
{ "avatar_url": "https://avatars.githubusercontent.com/u/5491519?v=4", "events_url": "https://api.github.com/users/zhuango/events{/privacy}", "followers_url": "https://api.github.com/users/zhuango/followers", "following_url": "https://api.github.com/users/zhuango/following{/other_user}", "gists_url": "https://api.github.com/users/zhuango/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zhuango", "id": 5491519, "login": "zhuango", "node_id": "MDQ6VXNlcjU0OTE1MTk=", "organizations_url": "https://api.github.com/users/zhuango/orgs", "received_events_url": "https://api.github.com/users/zhuango/received_events", "repos_url": "https://api.github.com/users/zhuango/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zhuango/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhuango/subscriptions", "type": "User", "url": "https://api.github.com/users/zhuango", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[]
2022-07-29T11:18:24Z
2022-07-29T11:18:24Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
## Describe the bug datasets fail to process SQuADv1.1 with max_seq_length=128, doc_stride=96 when calling datasets["train"].train_dataset.map(). ## Steps to reproduce the bug I used huggingface[ TF2 question-answering examples](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering). And my scripts are as follows: ``` python run_qa.py \ --model_name_or_path $BERT_DIR \ --dataset_name $SQUAD_DIR \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 128 \ --doc_stride 96 \ --output_dir $OUTPUT \ --save_steps 10000 \ --overwrite_cache \ --overwrite_output_dir \ ``` ## Expected results Normally process SQuADv1.1 datasets with max_seq_length=128, doc_stride=96. ## Actual results ``` INFO:__main__:Padding all batches to max length because argument was set or we're on TPU. WARNING:datasets.fingerprint:Parameter 'function'=<function main.<locals>.prepare_train_features at 0x7f15bc2d07a0> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed. 0%| | 0/88 [00:00<?, ?ba/s]thread '<unnamed>' panicked at 'assertion failed: stride < max_len', /__w/tokenizers/tokenizers/tokenizers/src/tokenizer/encoding.rs:311:9 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace 0%| | 0/88 [00:00<?, ?ba/s] Traceback (most recent call last): File "run_qa.py", line 743, in <module> main() File "run_qa.py", line 485, in main load_from_cache_file=not data_args.overwrite_cache, File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2394, in map desc=desc, File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 551, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 518, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/anaconda3/envs/py37/lib/python3.7/site-packages/datasets/fingerprint.py", line 458, in wrapper out = func(self, *args, **kwargs) File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2768, in _map_single offset=offset, File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2644, in apply_function_on_filtered_inputs processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) File "anaconda3/envs/py37/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2336, in decorated result = f(decorated_item, *args, **kwargs) File "run_qa.py", line 410, in prepare_train_features padding=padding, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 2512, in __call__ **kwargs, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 2703, in batch_encode_plus **kwargs, File "anaconda3/envs/py37/lib/python3.7/site-packages/transformers/tokenization_utils_fast.py", line 429, in _batch_encode_plus is_pretokenized=is_split_into_words, pyo3_runtime.PanicException: assertion failed: stride < max_len Traceback (most recent call last): File "./data/SQuADv1.1/evaluate-v1.1.py", line 92, in <module> with open(args.prediction_file) as prediction_file: FileNotFoundError: [Errno 2] No such file or directory: './output/bert_base_squadv1.1_tf2/eval_predictions.json' ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Ubuntu, pytorch=1.11.0, tensorflow-gpu=2.9.1 - Python version: 2.7 - PyArrow version: 8.0.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4769/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4769/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7080
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7080/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7080/comments
https://api.github.com/repos/huggingface/datasets/issues/7080/events
https://github.com/huggingface/datasets/issues/7080
2,434,275,664
I_kwDODunzps6RGBlQ
7,080
Generating train split takes a long time
{ "avatar_url": "https://avatars.githubusercontent.com/u/35648800?v=4", "events_url": "https://api.github.com/users/alexanderswerdlow/events{/privacy}", "followers_url": "https://api.github.com/users/alexanderswerdlow/followers", "following_url": "https://api.github.com/users/alexanderswerdlow/following{/other_user}", "gists_url": "https://api.github.com/users/alexanderswerdlow/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alexanderswerdlow", "id": 35648800, "login": "alexanderswerdlow", "node_id": "MDQ6VXNlcjM1NjQ4ODAw", "organizations_url": "https://api.github.com/users/alexanderswerdlow/orgs", "received_events_url": "https://api.github.com/users/alexanderswerdlow/received_events", "repos_url": "https://api.github.com/users/alexanderswerdlow/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alexanderswerdlow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexanderswerdlow/subscriptions", "type": "User", "url": "https://api.github.com/users/alexanderswerdlow", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "@alexanderswerdlow \r\nWhen no specific split is mentioned, the load_dataset library will load all available splits of the dataset. For example, if a dataset has \"train\" and \"test\" splits, the load_dataset function will load both into the DatasetDict object.\r\n\r\n![image](https://github.com/user-attachments/assets/379e6f57-7e1b-4cc3-bc36-dae3e878a51c)\r\n\r\n\r\nThe dataset PixArt-alpha/SAM-LLaVA-Captions10M may have been uploaded with different predefined splits (e.g., \"train\", \"test\", etc.), and by default, Hugging Face will load all splits unless you specifically request only one.\r\n\r\n### If you want to load only a specific split (e.g., only the \"train\" set), you can specify it in the split parameter like this:\r\n```python\r\nfrom datasets import load_dataset\r\ndataset = load_dataset(\"PixArt-alpha/SAM-LLaVA-Captions10M\", split=\"train\")\r\n```\r\n\r\n### You can also load multiple splits if needed:\r\n```python\r\ndataset = load_dataset(\"PixArt-alpha/SAM-LLaVA-Captions10M\", split=[\"train\", \"test\"])\r\n```\r\n\r\n", "@alexanderswerdlow, I will now work on this..\r\n\r\n## Idea:\r\nWhenever this code has ran:\r\n```python\r\nfrom datasets import load_dataset\r\ndataset = load_dataset(\"PixArt-alpha/SAM-LLaVA-Captions10M\")\r\n```\r\n\r\nIt should show all the splits of the datasets, and user has to choose which one should be loaded before generating a split like this,,\r\n\r\n![image](https://github.com/user-attachments/assets/8fbc604f-f0a5-4a59-a63e-aa4c26442c83)\r\n" ]
2024-07-29T01:42:43Z
2024-10-02T15:31:22Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Loading a simple webdataset takes ~45 minutes. ### Steps to reproduce the bug ``` from datasets import load_dataset dataset = load_dataset("PixArt-alpha/SAM-LLaVA-Captions10M") ``` ### Expected behavior The dataset should load immediately as it does when loaded through a normal indexed WebDataset loader. Generating splits should be optional and there should be a message showing how to disable it. ### Environment info - `datasets` version: 2.20.0 - Platform: Linux-4.18.0-372.32.1.el8_6.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.14 - `huggingface_hub` version: 0.24.1 - PyArrow version: 16.1.0 - Pandas version: 2.2.2 - `fsspec` version: 2024.5.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7080/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7080/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6328
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6328/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6328/comments
https://api.github.com/repos/huggingface/datasets/issues/6328/events
https://github.com/huggingface/datasets/issues/6328
1,955,857,904
I_kwDODunzps50lAXw
6,328
شبکه های متن به گفتار ابتدا متن داده شده را به بازنمایی میانی
{ "avatar_url": "https://avatars.githubusercontent.com/u/147399213?v=4", "events_url": "https://api.github.com/users/shabnam706/events{/privacy}", "followers_url": "https://api.github.com/users/shabnam706/followers", "following_url": "https://api.github.com/users/shabnam706/following{/other_user}", "gists_url": "https://api.github.com/users/shabnam706/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shabnam706", "id": 147399213, "login": "shabnam706", "node_id": "U_kgDOCMkiLQ", "organizations_url": "https://api.github.com/users/shabnam706/orgs", "received_events_url": "https://api.github.com/users/shabnam706/received_events", "repos_url": "https://api.github.com/users/shabnam706/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shabnam706/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shabnam706/subscriptions", "type": "User", "url": "https://api.github.com/users/shabnam706", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "شبکه های متن به گفتار ابتدا متن داده شده را به بازنمایی میانی" ]
2023-10-22T11:07:21Z
2023-10-23T09:22:38Z
2023-10-23T09:22:38Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6328/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6328/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7028
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7028/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7028/comments
https://api.github.com/repos/huggingface/datasets/issues/7028/events
https://github.com/huggingface/datasets/pull/7028
2,391,077,531
PR_kwDODunzps50dQ1w
7,028
Fix ci
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7028). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005748 / 0.011353 (-0.005605) | 0.004109 / 0.011008 (-0.006899) | 0.067017 / 0.038508 (0.028509) | 0.031950 / 0.023109 (0.008841) | 0.239939 / 0.275898 (-0.035959) | 0.266339 / 0.323480 (-0.057141) | 0.003176 / 0.007986 (-0.004809) | 0.003556 / 0.004328 (-0.000773) | 0.050725 / 0.004250 (0.046475) | 0.047711 / 0.037052 (0.010658) | 0.251048 / 0.258489 (-0.007441) | 0.287049 / 0.293841 (-0.006792) | 0.029919 / 0.128546 (-0.098627) | 0.012562 / 0.075646 (-0.063085) | 0.212903 / 0.419271 (-0.206369) | 0.036570 / 0.043533 (-0.006963) | 0.240975 / 0.255139 (-0.014164) | 0.266473 / 0.283200 (-0.016726) | 0.019959 / 0.141683 (-0.121724) | 1.152224 / 1.452155 (-0.299931) | 1.186046 / 1.492716 (-0.306671) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095836 / 0.018006 (0.077829) | 0.303402 / 0.000490 (0.302913) | 0.000210 / 0.000200 (0.000010) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020552 / 0.037411 (-0.016859) | 0.063619 / 0.014526 (0.049093) | 0.076969 / 0.176557 (-0.099588) | 0.123368 / 0.737135 (-0.613767) | 0.077005 / 0.296338 (-0.219334) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282005 / 0.215209 (0.066796) | 2.794144 / 2.077655 (0.716489) | 1.463569 / 1.504120 (-0.040551) | 1.334295 / 1.541195 (-0.206899) | 1.387198 / 1.468490 (-0.081292) | 0.707654 / 4.584777 (-3.877123) | 2.341698 / 3.745712 (-1.404014) | 2.865131 / 5.269862 (-2.404731) | 1.945168 / 4.565676 (-2.620509) | 0.077926 / 0.424275 (-0.346349) | 0.005470 / 0.007607 (-0.002137) | 0.336498 / 0.226044 (0.110454) | 3.330262 / 2.268929 (1.061334) | 1.865574 / 55.444624 (-53.579050) | 1.536932 / 6.876477 (-5.339545) | 1.720960 / 2.142072 (-0.421113) | 0.794753 / 4.805227 (-4.010475) | 0.133491 / 6.500664 (-6.367173) | 0.042437 / 0.075469 (-0.033032) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.976788 / 1.841788 (-0.865000) | 11.895137 / 8.074308 (3.820829) | 9.211969 / 10.191392 (-0.979423) | 0.141798 / 0.680424 (-0.538626) | 0.014354 / 0.534201 (-0.519847) | 0.306044 / 0.579283 (-0.273239) | 0.265016 / 0.434364 (-0.169348) | 0.340877 / 0.540337 (-0.199460) | 0.470449 / 1.386936 (-0.916487) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006134 / 0.011353 (-0.005219) | 0.004023 / 0.011008 (-0.006985) | 0.050419 / 0.038508 (0.011911) | 0.033853 / 0.023109 (0.010744) | 0.266799 / 0.275898 (-0.009099) | 0.291248 / 0.323480 (-0.032232) | 0.004474 / 0.007986 (-0.003511) | 0.002847 / 0.004328 (-0.001481) | 0.049895 / 0.004250 (0.045645) | 0.041160 / 0.037052 (0.004108) | 0.278818 / 0.258489 (0.020329) | 0.314027 / 0.293841 (0.020186) | 0.032303 / 0.128546 (-0.096243) | 0.012367 / 0.075646 (-0.063279) | 0.061495 / 0.419271 (-0.357776) | 0.033512 / 0.043533 (-0.010021) | 0.266168 / 0.255139 (0.011029) | 0.283129 / 0.283200 (-0.000071) | 0.018674 / 0.141683 (-0.123009) | 1.124453 / 1.452155 (-0.327701) | 1.164527 / 1.492716 (-0.328189) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098522 / 0.018006 (0.080516) | 0.315069 / 0.000490 (0.314579) | 0.000202 / 0.000200 (0.000002) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022809 / 0.037411 (-0.014602) | 0.078409 / 0.014526 (0.063883) | 0.088558 / 0.176557 (-0.087998) | 0.130004 / 0.737135 (-0.607131) | 0.090507 / 0.296338 (-0.205832) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291323 / 0.215209 (0.076114) | 2.836363 / 2.077655 (0.758708) | 1.548889 / 1.504120 (0.044769) | 1.423857 / 1.541195 (-0.117337) | 1.461667 / 1.468490 (-0.006823) | 0.714956 / 4.584777 (-3.869821) | 0.948170 / 3.745712 (-2.797542) | 3.036151 / 5.269862 (-2.233711) | 1.923824 / 4.565676 (-2.641853) | 0.078002 / 0.424275 (-0.346273) | 0.005198 / 0.007607 (-0.002409) | 0.337007 / 0.226044 (0.110963) | 3.310255 / 2.268929 (1.041327) | 1.910371 / 55.444624 (-53.534253) | 1.619855 / 6.876477 (-5.256622) | 1.682093 / 2.142072 (-0.459979) | 0.789903 / 4.805227 (-4.015324) | 0.132117 / 6.500664 (-6.368547) | 0.041312 / 0.075469 (-0.034157) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.997658 / 1.841788 (-0.844130) | 12.447878 / 8.074308 (4.373570) | 10.277662 / 10.191392 (0.086270) | 0.143580 / 0.680424 (-0.536844) | 0.016472 / 0.534201 (-0.517729) | 0.307235 / 0.579283 (-0.272048) | 0.125469 / 0.434364 (-0.308895) | 0.339525 / 0.540337 (-0.200813) | 0.427371 / 1.386936 (-0.959566) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#689447f8c86f777829a4db9ccc5d8133c12ec84c \"CML watermark\")\n" ]
2024-07-04T15:11:08Z
2024-07-04T15:26:35Z
2024-07-04T15:19:16Z
MEMBER
null
null
null
...after last pr errors
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7028/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7028/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7028.diff", "html_url": "https://github.com/huggingface/datasets/pull/7028", "merged_at": "2024-07-04T15:19:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/7028.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7028" }
https://api.github.com/repos/huggingface/datasets/issues/6983
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6983/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6983/comments
https://api.github.com/repos/huggingface/datasets/issues/6983/events
https://github.com/huggingface/datasets/pull/6983
2,361,806,201
PR_kwDODunzps5y7tK7
6,983
Remove metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
{ "closed_at": null, "closed_issues": 5, "created_at": "2023-02-13T16:22:42Z", "creator": { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }, "description": "Next major release", "due_on": null, "html_url": "https://github.com/huggingface/datasets/milestone/10", "id": 9038583, "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/10/labels", "node_id": "MI_kwDODunzps4Aier3", "number": 10, "open_issues": 3, "state": "open", "title": "3.0", "updated_at": "2024-08-21T09:35:06Z", "url": "https://api.github.com/repos/huggingface/datasets/milestones/10" }
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6983). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005566 / 0.011353 (-0.005787) | 0.003977 / 0.011008 (-0.007031) | 0.063250 / 0.038508 (0.024742) | 0.030907 / 0.023109 (0.007798) | 0.244989 / 0.275898 (-0.030909) | 0.272139 / 0.323480 (-0.051341) | 0.004332 / 0.007986 (-0.003653) | 0.002960 / 0.004328 (-0.001368) | 0.050147 / 0.004250 (0.045896) | 0.044740 / 0.037052 (0.007688) | 0.256947 / 0.258489 (-0.001542) | 0.290372 / 0.293841 (-0.003469) | 0.030444 / 0.128546 (-0.098102) | 0.012675 / 0.075646 (-0.062971) | 0.203852 / 0.419271 (-0.215420) | 0.036977 / 0.043533 (-0.006556) | 0.244401 / 0.255139 (-0.010738) | 0.270020 / 0.283200 (-0.013179) | 0.018177 / 0.141683 (-0.123506) | 1.122189 / 1.452155 (-0.329966) | 1.176688 / 1.492716 (-0.316028) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.100721 / 0.018006 (0.082715) | 0.311824 / 0.000490 (0.311335) | 0.000222 / 0.000200 (0.000022) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020039 / 0.037411 (-0.017373) | 0.062084 / 0.014526 (0.047558) | 0.074317 / 0.176557 (-0.102240) | 0.123935 / 0.737135 (-0.613200) | 0.076186 / 0.296338 (-0.220153) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284827 / 0.215209 (0.069618) | 2.782727 / 2.077655 (0.705072) | 1.417624 / 1.504120 (-0.086496) | 1.294476 / 1.541195 (-0.246718) | 1.332658 / 1.468490 (-0.135832) | 0.724820 / 4.584777 (-3.859957) | 2.384546 / 3.745712 (-1.361166) | 2.866759 / 5.269862 (-2.403103) | 1.930756 / 4.565676 (-2.634921) | 0.083090 / 0.424275 (-0.341185) | 0.005566 / 0.007607 (-0.002041) | 0.340117 / 0.226044 (0.114072) | 3.342417 / 2.268929 (1.073488) | 1.807842 / 55.444624 (-53.636782) | 1.511647 / 6.876477 (-5.364830) | 1.653893 / 2.142072 (-0.488179) | 0.803983 / 4.805227 (-4.001244) | 0.136205 / 6.500664 (-6.364459) | 0.042815 / 0.075469 (-0.032654) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.962346 / 1.841788 (-0.879442) | 11.792239 / 8.074308 (3.717931) | 9.236256 / 10.191392 (-0.955136) | 0.143200 / 0.680424 (-0.537224) | 0.015050 / 0.534201 (-0.519151) | 0.304623 / 0.579283 (-0.274660) | 0.266417 / 0.434364 (-0.167947) | 0.341213 / 0.540337 (-0.199124) | 0.454258 / 1.386936 (-0.932678) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005917 / 0.011353 (-0.005436) | 0.004005 / 0.011008 (-0.007003) | 0.049781 / 0.038508 (0.011273) | 0.033310 / 0.023109 (0.010200) | 0.271881 / 0.275898 (-0.004017) | 0.296855 / 0.323480 (-0.026625) | 0.004479 / 0.007986 (-0.003507) | 0.002818 / 0.004328 (-0.001510) | 0.048213 / 0.004250 (0.043962) | 0.043480 / 0.037052 (0.006428) | 0.285963 / 0.258489 (0.027473) | 0.317304 / 0.293841 (0.023463) | 0.031619 / 0.128546 (-0.096928) | 0.012312 / 0.075646 (-0.063335) | 0.059904 / 0.419271 (-0.359368) | 0.033152 / 0.043533 (-0.010381) | 0.274198 / 0.255139 (0.019059) | 0.290469 / 0.283200 (0.007269) | 0.019424 / 0.141683 (-0.122258) | 1.133669 / 1.452155 (-0.318485) | 1.194427 / 1.492716 (-0.298290) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.101561 / 0.018006 (0.083555) | 0.312617 / 0.000490 (0.312127) | 0.000216 / 0.000200 (0.000016) | 0.000045 / 0.000054 (-0.000009) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023705 / 0.037411 (-0.013706) | 0.076781 / 0.014526 (0.062255) | 0.089922 / 0.176557 (-0.086634) | 0.129182 / 0.737135 (-0.607953) | 0.092022 / 0.296338 (-0.204317) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.300977 / 0.215209 (0.085768) | 2.909088 / 2.077655 (0.831433) | 1.592821 / 1.504120 (0.088701) | 1.466627 / 1.541195 (-0.074568) | 1.497558 / 1.468490 (0.029068) | 0.720986 / 4.584777 (-3.863791) | 0.958039 / 3.745712 (-2.787673) | 3.023413 / 5.269862 (-2.246448) | 1.933245 / 4.565676 (-2.632432) | 0.080500 / 0.424275 (-0.343775) | 0.005243 / 0.007607 (-0.002364) | 0.361259 / 0.226044 (0.135215) | 3.447317 / 2.268929 (1.178389) | 1.938234 / 55.444624 (-53.506390) | 1.671563 / 6.876477 (-5.204913) | 1.674647 / 2.142072 (-0.467425) | 0.790606 / 4.805227 (-4.014621) | 0.133312 / 6.500664 (-6.367352) | 0.041241 / 0.075469 (-0.034228) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.996167 / 1.841788 (-0.845621) | 12.460877 / 8.074308 (4.386569) | 10.608415 / 10.191392 (0.417023) | 0.134076 / 0.680424 (-0.546348) | 0.016166 / 0.534201 (-0.518035) | 0.301218 / 0.579283 (-0.278065) | 0.128979 / 0.434364 (-0.305385) | 0.336453 / 0.540337 (-0.203884) | 0.435561 / 1.386936 (-0.951375) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#70e7355b7125fb792107ef5128ee3ad15cbec26c \"CML watermark\")\n" ]
2024-06-19T09:08:55Z
2024-06-28T06:57:38Z
2024-06-28T06:51:30Z
MEMBER
null
null
null
Remove all metrics, as part of the 3.0 release. Note they are deprecated since 2.5.0 version.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6983/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6983/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6983.diff", "html_url": "https://github.com/huggingface/datasets/pull/6983", "merged_at": "2024-06-28T06:51:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/6983.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6983" }
https://api.github.com/repos/huggingface/datasets/issues/5832
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5832/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5832/comments
https://api.github.com/repos/huggingface/datasets/issues/5832/events
https://github.com/huggingface/datasets/issues/5832
1,702,135,336
I_kwDODunzps5ldIYo
5,832
404 Client Error: Not Found for url: https://huggingface.co/api/models/bert-large-cased
{ "avatar_url": "https://avatars.githubusercontent.com/u/51288316?v=4", "events_url": "https://api.github.com/users/varungupta31/events{/privacy}", "followers_url": "https://api.github.com/users/varungupta31/followers", "following_url": "https://api.github.com/users/varungupta31/following{/other_user}", "gists_url": "https://api.github.com/users/varungupta31/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/varungupta31", "id": 51288316, "login": "varungupta31", "node_id": "MDQ6VXNlcjUxMjg4MzE2", "organizations_url": "https://api.github.com/users/varungupta31/orgs", "received_events_url": "https://api.github.com/users/varungupta31/received_events", "repos_url": "https://api.github.com/users/varungupta31/repos", "site_admin": false, "starred_url": "https://api.github.com/users/varungupta31/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/varungupta31/subscriptions", "type": "User", "url": "https://api.github.com/users/varungupta31", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "moved to https://github.com/huggingface/transformers/issues/23233" ]
2023-05-09T14:14:59Z
2023-05-09T14:25:59Z
2023-05-09T14:25:59Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Running [Bert-Large-Cased](https://huggingface.co/bert-large-cased) model causes `HTTPError`, with the following traceback- ``` HTTPError Traceback (most recent call last) <ipython-input-6-5c580443a1ad> in <module> ----> 1 tokenizer = BertTokenizer.from_pretrained('bert-large-cased') ~/miniconda3/envs/cmd-chall/lib/python3.7/site-packages/transformers/tokenization_utils_base.py in from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs) 1646 # At this point pretrained_model_name_or_path is either a directory or a model identifier name 1647 fast_tokenizer_file = get_fast_tokenizer_file( -> 1648 pretrained_model_name_or_path, revision=revision, use_auth_token=use_auth_token 1649 ) 1650 additional_files_names = { ~/miniconda3/envs/cmd-chall/lib/python3.7/site-packages/transformers/tokenization_utils_base.py in get_fast_tokenizer_file(path_or_repo, revision, use_auth_token) 3406 """ 3407 # Inspect all files from the repo/folder. -> 3408 all_files = get_list_of_files(path_or_repo, revision=revision, use_auth_token=use_auth_token) 3409 tokenizer_files_map = {} 3410 for file_name in all_files: ~/miniconda3/envs/cmd-chall/lib/python3.7/site-packages/transformers/file_utils.py in get_list_of_files(path_or_repo, revision, use_auth_token) 1685 token = None 1686 model_info = HfApi(endpoint=HUGGINGFACE_CO_RESOLVE_ENDPOINT).model_info( -> 1687 path_or_repo, revision=revision, token=token 1688 ) 1689 return [f.rfilename for f in model_info.siblings] ~/miniconda3/envs/cmd-chall/lib/python3.7/site-packages/huggingface_hub/hf_api.py in model_info(self, repo_id, revision, token) 246 ) 247 r = requests.get(path, headers=headers) --> 248 r.raise_for_status() 249 d = r.json() 250 return ModelInfo(**d) ~/miniconda3/envs/cmd-chall/lib/python3.7/site-packages/requests/models.py in raise_for_status(self) 951 952 if http_error_msg: --> 953 raise HTTPError(http_error_msg, response=self) 954 955 def close(self): HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/api/models/bert-large-cased ``` I have also tried running in offline mode, as [discussed here](https://huggingface.co/docs/transformers/installation#offline-mode) ``` HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 ``` ### Steps to reproduce the bug 1. `from transformers import BertTokenizer, BertModel` 2. `tokenizer = BertTokenizer.from_pretrained('bert-large-cased')` ### Expected behavior Run without the HTTP error. ### Environment info | # Name | Version | Build | Channel | | |--------------------|------------|-----------------------------|---------|---| | _libgcc_mutex | 0.1 | main | | | | _openmp_mutex | 4.5 | 1_gnu | | | | _pytorch_select | 0.1 | cpu_0 | | | | appdirs | 1.4.4 | pypi_0 | pypi | | | backcall | 0.2.0 | pypi_0 | pypi | | | blas | 1.0 | mkl | | | | bzip2 | 1.0.8 | h7b6447c_0 | | | | ca-certificates | 2021.7.5 | h06a4308_1 | | | | certifi | 2021.5.30 | py37h06a4308_0 | | | | cffi | 1.14.6 | py37h400218f_0 | | | | charset-normalizer | 2.0.3 | pypi_0 | pypi | | | click | 8.0.1 | pypi_0 | pypi | | | colorama | 0.4.4 | pypi_0 | pypi | | | cudatoolkit | 11.1.74 | h6bb024c_0 | nvidia | | | cycler | 0.11.0 | pypi_0 | pypi | | | decorator | 5.0.9 | pypi_0 | pypi | | | docker-pycreds | 0.4.0 | pypi_0 | pypi | | | docopt | 0.6.2 | pypi_0 | pypi | | | dominate | 2.6.0 | pypi_0 | pypi | | | ffmpeg | 4.3 | hf484d3e_0 | pytorch | | | filelock | 3.0.12 | pypi_0 | pypi | | | fonttools | 4.38.0 | pypi_0 | pypi | | | freetype | 2.10.4 | h5ab3b9f_0 | | | | gitdb | 4.0.7 | pypi_0 | pypi | | | gitpython | 3.1.18 | pypi_0 | pypi | | | gmp | 6.2.1 | h2531618_2 | | | | gnutls | 3.6.15 | he1e5248_0 | | | | huggingface-hub | 0.0.12 | pypi_0 | pypi | | | humanize | 3.10.0 | pypi_0 | pypi | | | idna | 3.2 | pypi_0 | pypi | | | importlib-metadata | 4.6.1 | pypi_0 | pypi | | | intel-openmp | 2019.4 | 243 | | | | ipdb | 0.13.9 | pypi_0 | pypi | | | ipython | 7.25.0 | pypi_0 | pypi | | | ipython-genutils | 0.2.0 | pypi_0 | pypi | | | jedi | 0.18.0 | pypi_0 | pypi | | | joblib | 1.0.1 | pypi_0 | pypi | | | jpeg | 9b | h024ee3a_2 | | | | jsonpickle | 1.5.2 | pypi_0 | pypi | | | kiwisolver | 1.4.4 | pypi_0 | pypi | | | lame | 3.100 | h7b6447c_0 | | | | lcms2 | 2.12 | h3be6417_0 | | | | ld_impl_linux-64 | 2.35.1 | h7274673_9 | | | | libffi | 3.3 | he6710b0_2 | | | | libgcc-ng | 9.3.0 | h5101ec6_17 | | | | libgomp | 9.3.0 | h5101ec6_17 | | | | libiconv | 1.15 | h63c8f33_5 | | | | libidn2 | 2.3.2 | h7f8727e_0 | | | | libmklml | 2019.0.5 | 0 | | | | libpng | 1.6.37 | hbc83047_0 | | | | libstdcxx-ng | 9.3.0 | hd4cf53a_17 | | | | libtasn1 | 4.16.0 | h27cfd23_0 | | | | libtiff | 4.2.0 | h85742a9_0 | | | | libunistring | 0.9.10 | h27cfd23_0 | | | | libuv | 1.40.0 | h7b6447c_0 | | | | libwebp-base | 1.2.0 | h27cfd23_0 | | | | lz4-c | 1.9.3 | h2531618_0 | | | | matplotlib | 3.5.3 | pypi_0 | pypi | | | matplotlib-inline | 0.1.2 | pypi_0 | pypi | | | mergedeep | 1.3.4 | pypi_0 | pypi | | | mkl | 2020.2 | 256 | | | | mkl-service | 2.3.0 | py37he8ac12f_0 | | | | mkl_fft | 1.3.0 | py37h54f3939_0 | | | | mkl_random | 1.1.1 | py37h0573a6f_0 | | | | msgpack | 1.0.2 | pypi_0 | pypi | | | munch | 2.5.0 | pypi_0 | pypi | | | ncurses | 6.2 | he6710b0_1 | | | | nettle | 3.7.3 | hbbd107a_1 | | | | ninja | 1.10.2 | hff7bd54_1 | | | | nltk | 3.8.1 | pypi_0 | pypi | | | numpy | 1.19.2 | py37h54aff64_0 | | | | numpy-base | 1.19.2 | py37hfa32c7d_0 | | | | olefile | 0.46 | py37_0 | | | | openh264 | 2.1.0 | hd408876_0 | | | | openjpeg | 2.3.0 | h05c96fa_1 | | | | openssl | 1.1.1k | h27cfd23_0 | | | | packaging | 21.0 | pypi_0 | pypi | | | pandas | 1.3.1 | pypi_0 | pypi | | | parso | 0.8.2 | pypi_0 | pypi | | | pathtools | 0.1.2 | pypi_0 | pypi | | | pexpect | 4.8.0 | pypi_0 | pypi | | | pickleshare | 0.7.5 | pypi_0 | pypi | | | pillow | 8.3.1 | py37h2c7a002_0 | | | | pip | 21.1.3 | py37h06a4308_0 | | | | prompt-toolkit | 3.0.19 | pypi_0 | pypi | | | protobuf | 4.21.12 | pypi_0 | pypi | | | psutil | 5.8.0 | pypi_0 | pypi | | | ptyprocess | 0.7.0 | pypi_0 | pypi | | | py-cpuinfo | 8.0.0 | pypi_0 | pypi | | | pycparser | 2.20 | py_2 | | | | pygments | 2.9.0 | pypi_0 | pypi | | | pyparsing | 2.4.7 | pypi_0 | pypi | | | python | 3.7.10 | h12debd9_4 | | | | python-dateutil | 2.8.2 | pypi_0 | pypi | | | pytorch | 1.9.0 | py3.7_cuda11.1_cudnn8.0.5_0 | pytorch | | | pytz | 2021.1 | pypi_0 | pypi | | | pyyaml | 5.4.1 | pypi_0 | pypi | | | readline | 8.1 | h27cfd23_0 | | | | regex | 2022.10.31 | pypi_0 | pypi | | | requests | 2.26.0 | pypi_0 | pypi | | | sacred | 0.8.2 | pypi_0 | pypi | | | sacremoses | 0.0.45 | pypi_0 | pypi | | | scikit-learn | 0.24.2 | pypi_0 | pypi | | | scipy | 1.7.0 | pypi_0 | pypi | | | sentry-sdk | 1.15.0 | pypi_0 | pypi | | | setproctitle | 1.3.2 | pypi_0 | pypi | | | setuptools | 52.0.0 | py37h06a4308_0 | | | | six | 1.16.0 | pyhd3eb1b0_0 | | | | smmap | 4.0.0 | pypi_0 | pypi | | | sqlite | 3.36.0 | hc218d9a_0 | | | | threadpoolctl | 2.2.0 | pypi_0 | pypi | | | tk | 8.6.10 | hbc83047_0 | | | | tokenizers | 0.10.3 | pypi_0 | pypi | | | toml | 0.10.2 | pypi_0 | pypi | | | torchaudio | 0.9.0 | py37 | pytorch | | | torchvision | 0.10.0 | py37_cu111 | pytorch | | | tqdm | 4.61.2 | pypi_0 | pypi | | | traitlets | 5.0.5 | pypi_0 | pypi | | | transformers | 4.9.1 | pypi_0 | pypi | | | typing-extensions | 3.10.0.0 | hd3eb1b0_0 | | | | typing_extensions | 3.10.0.0 | pyh06a4308_0 | | | | urllib3 | 1.26.14 | pypi_0 | pypi | | | wandb | 0.13.10 | pypi_0 | pypi | | | wcwidth | 0.2.5 | pypi_0 | pypi | | | wheel | 0.36.2 | pyhd3eb1b0_0 | | | | wrapt | 1.12.1 | pypi_0 | pypi | | | xz | 5.2.5 | h7b6447c_0 | | | | zipp | 3.5.0 | pypi_0 | pypi | | | zlib | 1.2.11 | h7b6447c_3 | | | | zstd | 1.4.9 | haebb681_0 | | |
{ "avatar_url": "https://avatars.githubusercontent.com/u/51288316?v=4", "events_url": "https://api.github.com/users/varungupta31/events{/privacy}", "followers_url": "https://api.github.com/users/varungupta31/followers", "following_url": "https://api.github.com/users/varungupta31/following{/other_user}", "gists_url": "https://api.github.com/users/varungupta31/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/varungupta31", "id": 51288316, "login": "varungupta31", "node_id": "MDQ6VXNlcjUxMjg4MzE2", "organizations_url": "https://api.github.com/users/varungupta31/orgs", "received_events_url": "https://api.github.com/users/varungupta31/received_events", "repos_url": "https://api.github.com/users/varungupta31/repos", "site_admin": false, "starred_url": "https://api.github.com/users/varungupta31/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/varungupta31/subscriptions", "type": "User", "url": "https://api.github.com/users/varungupta31", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5832/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5832/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6409
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6409/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6409/comments
https://api.github.com/repos/huggingface/datasets/issues/6409/events
https://github.com/huggingface/datasets/issues/6409
1,991,960,865
I_kwDODunzps52uukh
6,409
using DownloadManager to download from local filesystem and disable_progress_bar, there will be an exception
{ "avatar_url": "https://avatars.githubusercontent.com/u/16574677?v=4", "events_url": "https://api.github.com/users/neiblegy/events{/privacy}", "followers_url": "https://api.github.com/users/neiblegy/followers", "following_url": "https://api.github.com/users/neiblegy/following{/other_user}", "gists_url": "https://api.github.com/users/neiblegy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/neiblegy", "id": 16574677, "login": "neiblegy", "node_id": "MDQ6VXNlcjE2NTc0Njc3", "organizations_url": "https://api.github.com/users/neiblegy/orgs", "received_events_url": "https://api.github.com/users/neiblegy/received_events", "repos_url": "https://api.github.com/users/neiblegy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/neiblegy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neiblegy/subscriptions", "type": "User", "url": "https://api.github.com/users/neiblegy", "user_view_type": "public" }
[]
closed
false
null
[]
null
[]
2023-11-14T04:21:01Z
2023-11-22T16:42:09Z
2023-11-22T16:42:09Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug i'm using datasets.download.download_manager.DownloadManager to download files like "file:///a/b/c.txt", and i disable_progress_bar() to disable bar. there will be an exception as follows: `AttributeError: 'function' object has no attribute 'close' Exception ignored in: <function TqdmCallback.__del__ at 0x7fa8683d84c0> Traceback (most recent call last): File "/home/protoss.gao/.local/lib/python3.9/site-packages/fsspec/callbacks.py", line 233, in __del__ self.tqdm.close()` i check your source code in datasets/utils/file_utils.py:348 you define TqdmCallback derive from fsspec.callbacks.TqdmCallback but in the newest fsspec code [https://github.com/fsspec/filesystem_spec/blob/master/fsspec/callbacks.py](url) , line 146, in this case, _DEFAULT_CALLBACK will take effect, but in line 234, it calls "close()" function which _DEFAULT_CALLBACK don't have such thing. so i think the class "TqdmCallback" in datasets/utils/file_utils.py may override "__del__" function or report this bug to fsspec. ### Steps to reproduce the bug as i said ### Expected behavior no exception ### Environment info datasets: 2.14.4 python: 3.9 platform: x86_64
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6409/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6409/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6777
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6777/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6777/comments
https://api.github.com/repos/huggingface/datasets/issues/6777/events
https://github.com/huggingface/datasets/issues/6777
2,224,611,247
I_kwDODunzps6EmN-v
6,777
.Jsonl metadata not detected
{ "avatar_url": "https://avatars.githubusercontent.com/u/81643693?v=4", "events_url": "https://api.github.com/users/nighting0le01/events{/privacy}", "followers_url": "https://api.github.com/users/nighting0le01/followers", "following_url": "https://api.github.com/users/nighting0le01/following{/other_user}", "gists_url": "https://api.github.com/users/nighting0le01/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nighting0le01", "id": 81643693, "login": "nighting0le01", "node_id": "MDQ6VXNlcjgxNjQzNjkz", "organizations_url": "https://api.github.com/users/nighting0le01/orgs", "received_events_url": "https://api.github.com/users/nighting0le01/received_events", "repos_url": "https://api.github.com/users/nighting0le01/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nighting0le01/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nighting0le01/subscriptions", "type": "User", "url": "https://api.github.com/users/nighting0le01", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "Hi! `metadata.jsonl` (or `metadata.csv`) is the only allowed name for the `imagefolder`'s metadata files.", "@mariosasko hey i tried with metadata.jsonl also and it still doesn't get the right columns", "@mariosasko it says metadata.csv not found\r\n<img width=\"1150\" alt=\"image\" src=\"https://github.com/huggingface/datasets/assets/81643693/3754980c-6185-4413-88fa-b499bcdd4195\">\r\n\r\ndataset = load_dataset('/dataset',metadata.csv) \r\n\r\n| workspace\r\n|| source code\r\n| dataset\r\n| |-- images\r\n| |-- metadata.csv\r\n| |-- metadata.jsonl\r\n| |-- padded_images\r\n\r\nExample of metadata.jsonl file\r\n{\"caption\": \"a drawing depicts a full shot of a black t-shirt with a triangular pattern on the front there is a white label on the left side of the triangle\", \"image\": \"images/212734.png\", \"gaussian_padded_image\": \"padded_images/p_212734.png\"}\r\n{\"caption\": \"an eye-level full shot of a large elephant and a baby elephant standing in a watering hole on the left side is a small elephant with its head turned to the right of dry land, trees, and bushes\", \"image\": \"images/212735.png\", \"gaussian_padded_image\": \"padded_images/p_212735.png\"}\r\n", "Loading more than one image per row with `imagefolder` is not supported currently. You can subscribe to https://github.com/huggingface/datasets/issues/5760 to see when it will be.\r\n\r\nInstead, you can load the dataset with `Dataset.from_generator`:\r\n```python\r\nimport json\r\nfrom datasets import Dataset, Value, Image, Features\r\n\r\ndef gen():\r\n with open(\"./dataset/metadata.jsonl\") as f:\r\n for line in f:\r\n line = json.loads(line)\r\n yield {\"caption\": line[\"caption\"], \"image\": os.path.join(\"./dataset\", line[\"image\"], \"gaussian_padded_image\": os.path.join(\"./dataset\", line[\"gaussian_padded_image\"]))}\r\n\r\nfeatures = Features({\"caption\": Value(\"string\"), \"image\": Image(), \"gaussian_padded_image\": Image()})\r\ndataset = Dataset.from_generator(gen, features=features)\r\n```\r\n(E.g., if you want to share this dataset on the Hub, you can call `dataset.push_to_hub(...)` afterward)", "hi Thanks for sharing this, Actually I was trying with a webdataset format of the data as well and it did'nt work. Could you share how i can create Dataset object from webdataset format of this data?" ]
2024-04-04T06:31:53Z
2024-04-05T21:14:48Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hi I have the following directory structure: |--dataset | |-- images | |-- metadata1000.csv | |-- metadata1000.jsonl | |-- padded_images Example of metadata1000.jsonl file {"caption": "a drawing depicts a full shot of a black t-shirt with a triangular pattern on the front there is a white label on the left side of the triangle", "image": "images/212734.png", "gaussian_padded_image": "padded_images/p_212734.png"} {"caption": "an eye-level full shot of a large elephant and a baby elephant standing in a watering hole on the left side is a small elephant with its head turned to the right of dry land, trees, and bushes", "image": "images/212735.png", "gaussian_padded_image": "padded_images/p_212735.png"} . . . I'm trying to use dataset = load_dataset("imagefolder", data_dir='/dataset/', split='train') to load the the dataset, however it is not able to load according to the fields in the metadata1000.jsonl . please assist to load the data properly also getting ``` File "/workspace/train_trans_vae.py", line 1089, in <module> print(get_metadata_patterns('/dataset/')) File "/opt/conda/lib/python3.10/site-packages/datasets/data_files.py", line 499, in get_metadata_patterns raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None FileNotFoundError: The directory at /dataset/ doesn't contain any metadata file ``` when trying ``` from datasets.data_files import get_metadata_patterns print(get_metadata_patterns('/dataset/')) ``` ### Steps to reproduce the bug dataset Version: 2.18.0 make a similar jsonl and similar directory format ### Expected behavior creates a dataset object with the column names, caption,image,gaussian_padded_image ### Environment info dataset Version: 2.18.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6777/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6777/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5703
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5703/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5703/comments
https://api.github.com/repos/huggingface/datasets/issues/5703/events
https://github.com/huggingface/datasets/pull/5703
1,653,158,955
PR_kwDODunzps5NjCCV
5,703
[WIP][Test, Please ignore] Investigate performance impact of using multiprocessing only
{ "avatar_url": "https://avatars.githubusercontent.com/u/1535968?v=4", "events_url": "https://api.github.com/users/hvaara/events{/privacy}", "followers_url": "https://api.github.com/users/hvaara/followers", "following_url": "https://api.github.com/users/hvaara/following{/other_user}", "gists_url": "https://api.github.com/users/hvaara/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hvaara", "id": 1535968, "login": "hvaara", "node_id": "MDQ6VXNlcjE1MzU5Njg=", "organizations_url": "https://api.github.com/users/hvaara/orgs", "received_events_url": "https://api.github.com/users/hvaara/received_events", "repos_url": "https://api.github.com/users/hvaara/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hvaara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hvaara/subscriptions", "type": "User", "url": "https://api.github.com/users/hvaara", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "`multiprocess` uses `dill` instead of `pickle` for pickling shared objects and, as such, can pickle more types than `multiprocessing`. And I don't think this is something we want to change :).", "That makes sense to me, and I don't think you should merge this change. I was only curious about the performance impact. I saw the benchmarks that was produced in other PRs, and wanted to get a better understanding of it. I created this PR to see if it got automatically added here.\r\n\r\nIs there a way I can generate those benchmarks myself?", "You can find some speed comparisons between dill and pickle on SO if you google \"dill vs pickle speed\".\r\n\r\nAnd for the benchmarks, you can generate them locally with DVC running this code from the repo root: https://github.com/huggingface/datasets/blob/0803a006db1c395ac715662cc6079651f77c11ea/.github/workflows/benchmarks.yaml#L23-L47.", "Thanks for the help @mariosasko!" ]
2023-04-04T04:37:49Z
2023-04-20T03:17:37Z
2023-04-20T03:17:32Z
NONE
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/1535968?v=4", "events_url": "https://api.github.com/users/hvaara/events{/privacy}", "followers_url": "https://api.github.com/users/hvaara/followers", "following_url": "https://api.github.com/users/hvaara/following{/other_user}", "gists_url": "https://api.github.com/users/hvaara/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hvaara", "id": 1535968, "login": "hvaara", "node_id": "MDQ6VXNlcjE1MzU5Njg=", "organizations_url": "https://api.github.com/users/hvaara/orgs", "received_events_url": "https://api.github.com/users/hvaara/received_events", "repos_url": "https://api.github.com/users/hvaara/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hvaara/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hvaara/subscriptions", "type": "User", "url": "https://api.github.com/users/hvaara", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5703/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5703/timeline
null
null
1
{ "diff_url": "https://github.com/huggingface/datasets/pull/5703.diff", "html_url": "https://github.com/huggingface/datasets/pull/5703", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5703.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5703" }
https://api.github.com/repos/huggingface/datasets/issues/6332
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6332/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6332/comments
https://api.github.com/repos/huggingface/datasets/issues/6332/events
https://github.com/huggingface/datasets/pull/6332
1,956,697,328
PR_kwDODunzps5dgW3w
6,332
Replace deprecated license_file in setup.cfg
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006884 / 0.011353 (-0.004469) | 0.004132 / 0.011008 (-0.006877) | 0.085993 / 0.038508 (0.047485) | 0.084049 / 0.023109 (0.060940) | 0.346194 / 0.275898 (0.070296) | 0.386999 / 0.323480 (0.063519) | 0.004185 / 0.007986 (-0.003801) | 0.004354 / 0.004328 (0.000026) | 0.065137 / 0.004250 (0.060886) | 0.057629 / 0.037052 (0.020577) | 0.353639 / 0.258489 (0.095150) | 0.400815 / 0.293841 (0.106974) | 0.031370 / 0.128546 (-0.097176) | 0.008719 / 0.075646 (-0.066927) | 0.289579 / 0.419271 (-0.129693) | 0.052826 / 0.043533 (0.009293) | 0.351110 / 0.255139 (0.095971) | 0.375663 / 0.283200 (0.092464) | 0.025892 / 0.141683 (-0.115791) | 1.481943 / 1.452155 (0.029789) | 1.541494 / 1.492716 (0.048778) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.240007 / 0.018006 (0.222000) | 0.456216 / 0.000490 (0.455726) | 0.009348 / 0.000200 (0.009148) | 0.000370 / 0.000054 (0.000315) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.029541 / 0.037411 (-0.007870) | 0.088394 / 0.014526 (0.073868) | 0.098460 / 0.176557 (-0.078096) | 0.154053 / 0.737135 (-0.583083) | 0.098821 / 0.296338 (-0.197518) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.386751 / 0.215209 (0.171542) | 3.809818 / 2.077655 (1.732164) | 1.833439 / 1.504120 (0.329319) | 1.686924 / 1.541195 (0.145729) | 1.796882 / 1.468490 (0.328392) | 0.488853 / 4.584777 (-4.095924) | 3.606369 / 3.745712 (-0.139343) | 3.460003 / 5.269862 (-1.809858) | 2.087493 / 4.565676 (-2.478184) | 0.056838 / 0.424275 (-0.367437) | 0.007679 / 0.007607 (0.000072) | 0.455080 / 0.226044 (0.229036) | 4.539227 / 2.268929 (2.270299) | 2.337245 / 55.444624 (-53.107379) | 1.988195 / 6.876477 (-4.888281) | 2.067473 / 2.142072 (-0.074600) | 0.576640 / 4.805227 (-4.228587) | 0.132140 / 6.500664 (-6.368525) | 0.060737 / 0.075469 (-0.014732) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.268866 / 1.841788 (-0.572922) | 19.695296 / 8.074308 (11.620988) | 14.431254 / 10.191392 (4.239862) | 0.166779 / 0.680424 (-0.513645) | 0.018262 / 0.534201 (-0.515939) | 0.390406 / 0.579283 (-0.188877) | 0.411284 / 0.434364 (-0.023080) | 0.456696 / 0.540337 (-0.083642) | 0.629660 / 1.386936 (-0.757276) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007210 / 0.011353 (-0.004143) | 0.004124 / 0.011008 (-0.006884) | 0.065877 / 0.038508 (0.027368) | 0.086242 / 0.023109 (0.063133) | 0.420087 / 0.275898 (0.144189) | 0.454327 / 0.323480 (0.130847) | 0.005586 / 0.007986 (-0.002399) | 0.003465 / 0.004328 (-0.000863) | 0.065153 / 0.004250 (0.060902) | 0.059337 / 0.037052 (0.022285) | 0.420913 / 0.258489 (0.162424) | 0.458552 / 0.293841 (0.164711) | 0.032335 / 0.128546 (-0.096211) | 0.008672 / 0.075646 (-0.066974) | 0.072029 / 0.419271 (-0.347242) | 0.048148 / 0.043533 (0.004615) | 0.423334 / 0.255139 (0.168196) | 0.440616 / 0.283200 (0.157416) | 0.023761 / 0.141683 (-0.117922) | 1.487022 / 1.452155 (0.034868) | 1.554028 / 1.492716 (0.061312) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.216693 / 0.018006 (0.198687) | 0.446359 / 0.000490 (0.445869) | 0.005294 / 0.000200 (0.005094) | 0.000100 / 0.000054 (0.000045) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.034655 / 0.037411 (-0.002756) | 0.099479 / 0.014526 (0.084953) | 0.111822 / 0.176557 (-0.064735) | 0.160675 / 0.737135 (-0.576461) | 0.108718 / 0.296338 (-0.187621) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.440270 / 0.215209 (0.225061) | 4.389013 / 2.077655 (2.311358) | 2.408007 / 1.504120 (0.903887) | 2.237233 / 1.541195 (0.696038) | 2.344131 / 1.468490 (0.875641) | 0.493143 / 4.584777 (-4.091634) | 3.620024 / 3.745712 (-0.125688) | 3.335810 / 5.269862 (-1.934052) | 2.079256 / 4.565676 (-2.486420) | 0.058324 / 0.424275 (-0.365951) | 0.007410 / 0.007607 (-0.000197) | 0.512057 / 0.226044 (0.286013) | 5.120629 / 2.268929 (2.851701) | 2.913268 / 55.444624 (-52.531356) | 2.558214 / 6.876477 (-4.318262) | 2.784146 / 2.142072 (0.642074) | 0.593308 / 4.805227 (-4.211920) | 0.134941 / 6.500664 (-6.365723) | 0.062292 / 0.075469 (-0.013177) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.351795 / 1.841788 (-0.489993) | 20.489559 / 8.074308 (12.415251) | 15.046116 / 10.191392 (4.854724) | 0.166339 / 0.680424 (-0.514085) | 0.020449 / 0.534201 (-0.513752) | 0.406570 / 0.579283 (-0.172713) | 0.423405 / 0.434364 (-0.010959) | 0.474541 / 0.540337 (-0.065796) | 0.653280 / 1.386936 (-0.733656) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#3bde0f0f0e556e55b95c72b0f83bdcf7145c813c \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006362 / 0.011353 (-0.004991) | 0.003990 / 0.011008 (-0.007018) | 0.084020 / 0.038508 (0.045512) | 0.072198 / 0.023109 (0.049089) | 0.335992 / 0.275898 (0.060094) | 0.362056 / 0.323480 (0.038576) | 0.005298 / 0.007986 (-0.002688) | 0.003421 / 0.004328 (-0.000908) | 0.065343 / 0.004250 (0.061092) | 0.053310 / 0.037052 (0.016258) | 0.344855 / 0.258489 (0.086366) | 0.385524 / 0.293841 (0.091683) | 0.030209 / 0.128546 (-0.098337) | 0.008465 / 0.075646 (-0.067181) | 0.287359 / 0.419271 (-0.131912) | 0.051371 / 0.043533 (0.007838) | 0.338716 / 0.255139 (0.083577) | 0.351730 / 0.283200 (0.068530) | 0.023581 / 0.141683 (-0.118102) | 1.473772 / 1.452155 (0.021617) | 1.560594 / 1.492716 (0.067878) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.309019 / 0.018006 (0.291013) | 0.561428 / 0.000490 (0.560939) | 0.007237 / 0.000200 (0.007038) | 0.000266 / 0.000054 (0.000212) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028172 / 0.037411 (-0.009239) | 0.081050 / 0.014526 (0.066524) | 0.095952 / 0.176557 (-0.080604) | 0.151796 / 0.737135 (-0.585340) | 0.096132 / 0.296338 (-0.200206) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.384287 / 0.215209 (0.169078) | 3.840797 / 2.077655 (1.763142) | 1.891120 / 1.504120 (0.387000) | 1.743498 / 1.541195 (0.202303) | 1.821037 / 1.468490 (0.352547) | 0.484946 / 4.584777 (-4.099831) | 3.586053 / 3.745712 (-0.159659) | 3.446215 / 5.269862 (-1.823647) | 2.054352 / 4.565676 (-2.511325) | 0.057315 / 0.424275 (-0.366960) | 0.007541 / 0.007607 (-0.000066) | 0.464088 / 0.226044 (0.238044) | 4.634005 / 2.268929 (2.365076) | 2.355818 / 55.444624 (-53.088806) | 2.045584 / 6.876477 (-4.830893) | 2.039455 / 2.142072 (-0.102617) | 0.576137 / 4.805227 (-4.229090) | 0.132071 / 6.500664 (-6.368593) | 0.059611 / 0.075469 (-0.015858) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.280078 / 1.841788 (-0.561710) | 19.054079 / 8.074308 (10.979771) | 14.291090 / 10.191392 (4.099698) | 0.170607 / 0.680424 (-0.509817) | 0.018489 / 0.534201 (-0.515712) | 0.391802 / 0.579283 (-0.187481) | 0.418945 / 0.434364 (-0.015419) | 0.464084 / 0.540337 (-0.076254) | 0.638099 / 1.386936 (-0.748837) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006735 / 0.011353 (-0.004618) | 0.004133 / 0.011008 (-0.006876) | 0.064620 / 0.038508 (0.026112) | 0.076395 / 0.023109 (0.053286) | 0.399659 / 0.275898 (0.123761) | 0.426821 / 0.323480 (0.103341) | 0.006407 / 0.007986 (-0.001578) | 0.003472 / 0.004328 (-0.000857) | 0.064922 / 0.004250 (0.060671) | 0.058312 / 0.037052 (0.021260) | 0.403286 / 0.258489 (0.144797) | 0.437772 / 0.293841 (0.143931) | 0.032323 / 0.128546 (-0.096223) | 0.008727 / 0.075646 (-0.066919) | 0.071344 / 0.419271 (-0.347927) | 0.048673 / 0.043533 (0.005141) | 0.400693 / 0.255139 (0.145554) | 0.418668 / 0.283200 (0.135468) | 0.022871 / 0.141683 (-0.118812) | 1.517691 / 1.452155 (0.065536) | 1.552021 / 1.492716 (0.059305) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.305279 / 0.018006 (0.287272) | 0.520054 / 0.000490 (0.519564) | 0.007247 / 0.000200 (0.007047) | 0.000098 / 0.000054 (0.000044) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032001 / 0.037411 (-0.005410) | 0.091273 / 0.014526 (0.076747) | 0.106480 / 0.176557 (-0.070077) | 0.163122 / 0.737135 (-0.574014) | 0.105244 / 0.296338 (-0.191094) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.432207 / 0.215209 (0.216998) | 4.304856 / 2.077655 (2.227202) | 2.326790 / 1.504120 (0.822670) | 2.150081 / 1.541195 (0.608886) | 2.150558 / 1.468490 (0.682068) | 0.488808 / 4.584777 (-4.095969) | 3.690435 / 3.745712 (-0.055277) | 3.302625 / 5.269862 (-1.967236) | 2.044193 / 4.565676 (-2.521483) | 0.057520 / 0.424275 (-0.366755) | 0.007281 / 0.007607 (-0.000326) | 0.521078 / 0.226044 (0.295034) | 5.162620 / 2.268929 (2.893691) | 2.744041 / 55.444624 (-52.700583) | 2.407211 / 6.876477 (-4.469266) | 2.606290 / 2.142072 (0.464217) | 0.586412 / 4.805227 (-4.218815) | 0.132152 / 6.500664 (-6.368512) | 0.059424 / 0.075469 (-0.016045) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.351879 / 1.841788 (-0.489908) | 19.460608 / 8.074308 (11.386299) | 14.643413 / 10.191392 (4.452021) | 0.168062 / 0.680424 (-0.512362) | 0.020396 / 0.534201 (-0.513805) | 0.395885 / 0.579283 (-0.183398) | 0.439551 / 0.434364 (0.005187) | 0.473051 / 0.540337 (-0.067286) | 0.644614 / 1.386936 (-0.742322) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#732b2ed47728fffc8d74f92691c21de8ac7423fe \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.014708 / 0.011353 (0.003355) | 0.008309 / 0.011008 (-0.002699) | 0.138986 / 0.038508 (0.100478) | 0.121781 / 0.023109 (0.098671) | 0.495536 / 0.275898 (0.219637) | 0.565195 / 0.323480 (0.241715) | 0.008018 / 0.007986 (0.000032) | 0.004904 / 0.004328 (0.000575) | 0.080622 / 0.004250 (0.076371) | 0.078917 / 0.037052 (0.041865) | 0.489424 / 0.258489 (0.230935) | 0.540496 / 0.293841 (0.246656) | 0.061110 / 0.128546 (-0.067437) | 0.021443 / 0.075646 (-0.054203) | 0.395789 / 0.419271 (-0.023482) | 0.076727 / 0.043533 (0.033194) | 0.427808 / 0.255139 (0.172669) | 0.519672 / 0.283200 (0.236473) | 0.041607 / 0.141683 (-0.100076) | 2.098675 / 1.452155 (0.646520) | 2.175123 / 1.492716 (0.682407) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.275784 / 0.018006 (0.257777) | 0.707103 / 0.000490 (0.706613) | 0.011524 / 0.000200 (0.011324) | 0.000390 / 0.000054 (0.000336) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032897 / 0.037411 (-0.004514) | 0.123239 / 0.014526 (0.108713) | 0.151815 / 0.176557 (-0.024741) | 0.214790 / 0.737135 (-0.522345) | 0.139166 / 0.296338 (-0.157173) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.740662 / 0.215209 (0.525453) | 7.540376 / 2.077655 (5.462721) | 3.168207 / 1.504120 (1.664087) | 2.745663 / 1.541195 (1.204468) | 2.714020 / 1.468490 (1.245530) | 1.182632 / 4.584777 (-3.402145) | 6.365807 / 3.745712 (2.620095) | 6.317228 / 5.269862 (1.047366) | 4.061107 / 4.565676 (-0.504569) | 0.146939 / 0.424275 (-0.277336) | 0.011765 / 0.007607 (0.004158) | 0.910564 / 0.226044 (0.684519) | 9.020618 / 2.268929 (6.751689) | 4.180748 / 55.444624 (-51.263876) | 3.290257 / 6.876477 (-3.586220) | 3.363172 / 2.142072 (1.221099) | 1.239142 / 4.805227 (-3.566086) | 0.294965 / 6.500664 (-6.205699) | 0.088520 / 0.075469 (0.013051) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.867528 / 1.841788 (0.025741) | 29.494058 / 8.074308 (21.419750) | 31.386703 / 10.191392 (21.195311) | 0.302488 / 0.680424 (-0.377936) | 0.036116 / 0.534201 (-0.498085) | 0.622112 / 0.579283 (0.042829) | 0.775658 / 0.434364 (0.341294) | 0.632452 / 0.540337 (0.092115) | 0.909424 / 1.386936 (-0.477512) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.016002 / 0.011353 (0.004649) | 0.007007 / 0.011008 (-0.004002) | 0.100463 / 0.038508 (0.061955) | 0.124423 / 0.023109 (0.101314) | 0.556014 / 0.275898 (0.280116) | 0.600909 / 0.323480 (0.277429) | 0.007272 / 0.007986 (-0.000714) | 0.006743 / 0.004328 (0.002415) | 0.088575 / 0.004250 (0.084324) | 0.066003 / 0.037052 (0.028951) | 0.580080 / 0.258489 (0.321591) | 0.655567 / 0.293841 (0.361726) | 0.065295 / 0.128546 (-0.063252) | 0.021105 / 0.075646 (-0.054541) | 0.120044 / 0.419271 (-0.299227) | 0.081133 / 0.043533 (0.037600) | 0.570322 / 0.255139 (0.315183) | 0.581134 / 0.283200 (0.297934) | 0.046298 / 0.141683 (-0.095385) | 2.113200 / 1.452155 (0.661045) | 2.344187 / 1.492716 (0.851471) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.284517 / 0.018006 (0.266511) | 0.611834 / 0.000490 (0.611345) | 0.005581 / 0.000200 (0.005381) | 0.000153 / 0.000054 (0.000098) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.042162 / 0.037411 (0.004750) | 0.114496 / 0.014526 (0.099970) | 0.134034 / 0.176557 (-0.042523) | 0.201649 / 0.737135 (-0.535486) | 0.143235 / 0.296338 (-0.153103) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.764863 / 0.215209 (0.549654) | 7.603076 / 2.077655 (5.525421) | 3.318911 / 1.504120 (1.814791) | 2.939815 / 1.541195 (1.398620) | 2.870911 / 1.468490 (1.402421) | 1.171978 / 4.584777 (-3.412799) | 6.479933 / 3.745712 (2.734221) | 5.944387 / 5.269862 (0.674526) | 4.282625 / 4.565676 (-0.283051) | 0.123672 / 0.424275 (-0.300603) | 0.009666 / 0.007607 (0.002059) | 0.870683 / 0.226044 (0.644638) | 9.187788 / 2.268929 (6.918859) | 4.431818 / 55.444624 (-51.012807) | 3.460457 / 6.876477 (-3.416020) | 3.708198 / 2.142072 (1.566126) | 1.353673 / 4.805227 (-3.451554) | 0.264274 / 6.500664 (-6.236390) | 0.074943 / 0.075469 (-0.000526) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 2.073810 / 1.841788 (0.232023) | 29.182464 / 8.074308 (21.108156) | 30.527040 / 10.191392 (20.335648) | 0.307561 / 0.680424 (-0.372863) | 0.047384 / 0.534201 (-0.486817) | 0.662760 / 0.579283 (0.083477) | 0.768321 / 0.434364 (0.333957) | 0.692296 / 0.540337 (0.151959) | 0.955197 / 1.386936 (-0.431739) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1e82d6f017c7fc0ab6b65847c1e34772c880d3b7 \"CML watermark\")\n" ]
2023-10-23T09:05:26Z
2023-11-07T08:23:10Z
2023-11-07T08:09:06Z
MEMBER
null
null
null
Replace deprecated license_file in `setup.cfg`. See: https://github.com/huggingface/datasets/actions/runs/6610930650/job/17953825724?pr=6331 ``` /tmp/pip-build-env-a51hls20/overlay/lib/python3.8/site-packages/setuptools/config/setupcfg.py:293: _DeprecatedConfig: Deprecated config in `setup.cfg` !! ******************************************************************************** The license_file parameter is deprecated, use license_files instead. By 2023-Oct-30, you need to update your project and remove deprecated calls or your builds will no longer be supported. See https://setuptools.pypa.io/en/latest/userguide/declarative_config.html for details. ******************************************************************************** !! ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6332/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6332/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6332.diff", "html_url": "https://github.com/huggingface/datasets/pull/6332", "merged_at": "2023-11-07T08:09:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/6332.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6332" }
https://api.github.com/repos/huggingface/datasets/issues/4718
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4718/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4718/comments
https://api.github.com/repos/huggingface/datasets/issues/4718/events
https://github.com/huggingface/datasets/pull/4718
1,309,520,453
PR_kwDODunzps47prWR
4,718
Make Extractor accept Path as input
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-07-19T13:25:06Z
2022-07-22T13:42:27Z
2022-07-22T13:29:43Z
MEMBER
null
null
null
This PR: - Makes `Extractor` accept instance of `Path` as input - Removes unnecessary castings of `Path` to `str`
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4718/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4718/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4718.diff", "html_url": "https://github.com/huggingface/datasets/pull/4718", "merged_at": "2022-07-22T13:29:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/4718.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4718" }
https://api.github.com/repos/huggingface/datasets/issues/7394
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7394/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7394/comments
https://api.github.com/repos/huggingface/datasets/issues/7394/events
https://github.com/huggingface/datasets/issues/7394
2,847,172,115
I_kwDODunzps6ptGYT
7,394
Using load_dataset with data_files and split arguments yields an error
{ "avatar_url": "https://avatars.githubusercontent.com/u/61103399?v=4", "events_url": "https://api.github.com/users/devon-research/events{/privacy}", "followers_url": "https://api.github.com/users/devon-research/followers", "following_url": "https://api.github.com/users/devon-research/following{/other_user}", "gists_url": "https://api.github.com/users/devon-research/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/devon-research", "id": 61103399, "login": "devon-research", "node_id": "MDQ6VXNlcjYxMTAzMzk5", "organizations_url": "https://api.github.com/users/devon-research/orgs", "received_events_url": "https://api.github.com/users/devon-research/received_events", "repos_url": "https://api.github.com/users/devon-research/repos", "site_admin": false, "starred_url": "https://api.github.com/users/devon-research/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/devon-research/subscriptions", "type": "User", "url": "https://api.github.com/users/devon-research", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2025-02-12T04:50:11Z
2025-02-12T04:50:11Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug It seems the list of valid splits recorded by the package becomes incorrectly overwritten when using the `data_files` argument. If I run ```python from datasets import load_dataset load_dataset("allenai/super", split="all_examples", data_files="tasks/expert.jsonl") ``` then I get the error ``` ValueError: Unknown split "all_examples". Should be one of ['train']. ``` However, if I run ```python from datasets import load_dataset load_dataset("allenai/super", split="train", name="Expert") ``` then I get ``` ValueError: Unknown split "train". Should be one of ['all_examples']. ``` ### Steps to reproduce the bug Run ```python from datasets import load_dataset load_dataset("allenai/super", split="all_examples", data_files="tasks/expert.jsonl") ``` ### Expected behavior No error. ### Environment info Python = 3.12 datasets = 3.2.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7394/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7394/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5845
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5845/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5845/comments
https://api.github.com/repos/huggingface/datasets/issues/5845/events
https://github.com/huggingface/datasets/pull/5845
1,706,253,251
PR_kwDODunzps5QUMjS
5,845
Add `date_format` param to the CSV reader
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007592 / 0.011353 (-0.003761) | 0.005223 / 0.011008 (-0.005786) | 0.110218 / 0.038508 (0.071710) | 0.027644 / 0.023109 (0.004534) | 0.335063 / 0.275898 (0.059165) | 0.347102 / 0.323480 (0.023623) | 0.005107 / 0.007986 (-0.002878) | 0.003932 / 0.004328 (-0.000396) | 0.086095 / 0.004250 (0.081845) | 0.034735 / 0.037052 (-0.002317) | 0.329029 / 0.258489 (0.070540) | 0.370282 / 0.293841 (0.076441) | 0.043040 / 0.128546 (-0.085507) | 0.019626 / 0.075646 (-0.056021) | 0.336452 / 0.419271 (-0.082819) | 0.070365 / 0.043533 (0.026832) | 0.326881 / 0.255139 (0.071742) | 0.354984 / 0.283200 (0.071785) | 0.102605 / 0.141683 (-0.039077) | 1.459161 / 1.452155 (0.007007) | 1.453599 / 1.492716 (-0.039117) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.201021 / 0.018006 (0.183015) | 0.456415 / 0.000490 (0.455926) | 0.012349 / 0.000200 (0.012149) | 0.000115 / 0.000054 (0.000061) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025199 / 0.037411 (-0.012213) | 0.098536 / 0.014526 (0.084010) | 0.107528 / 0.176557 (-0.069028) | 0.160492 / 0.737135 (-0.576643) | 0.108660 / 0.296338 (-0.187679) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.527020 / 0.215209 (0.311811) | 5.357635 / 2.077655 (3.279980) | 2.062930 / 1.504120 (0.558811) | 1.783009 / 1.541195 (0.241815) | 1.840225 / 1.468490 (0.371735) | 1.074278 / 4.584777 (-3.510499) | 4.710533 / 3.745712 (0.964821) | 2.611202 / 5.269862 (-2.658660) | 1.885487 / 4.565676 (-2.680189) | 0.123201 / 0.424275 (-0.301074) | 0.013880 / 0.007607 (0.006273) | 0.636511 / 0.226044 (0.410467) | 6.516075 / 2.268929 (4.247146) | 2.710138 / 55.444624 (-52.734486) | 2.046606 / 6.876477 (-4.829871) | 2.085907 / 2.142072 (-0.056166) | 1.199489 / 4.805227 (-3.605738) | 0.211668 / 6.500664 (-6.288996) | 0.075436 / 0.075469 (-0.000033) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.219771 / 1.841788 (-0.622016) | 14.276215 / 8.074308 (6.201907) | 16.611529 / 10.191392 (6.420137) | 0.221091 / 0.680424 (-0.459333) | 0.024922 / 0.534201 (-0.509279) | 0.431906 / 0.579283 (-0.147377) | 0.518863 / 0.434364 (0.084499) | 0.515366 / 0.540337 (-0.024971) | 0.640411 / 1.386936 (-0.746525) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007955 / 0.011353 (-0.003398) | 0.004813 / 0.011008 (-0.006196) | 0.076508 / 0.038508 (0.038000) | 0.028137 / 0.023109 (0.005028) | 0.349609 / 0.275898 (0.073711) | 0.403588 / 0.323480 (0.080109) | 0.005456 / 0.007986 (-0.002530) | 0.005677 / 0.004328 (0.001349) | 0.076882 / 0.004250 (0.072632) | 0.039832 / 0.037052 (0.002779) | 0.351930 / 0.258489 (0.093440) | 0.390492 / 0.293841 (0.096651) | 0.045199 / 0.128546 (-0.083347) | 0.023945 / 0.075646 (-0.051701) | 0.091140 / 0.419271 (-0.328132) | 0.057728 / 0.043533 (0.014195) | 0.370663 / 0.255139 (0.115524) | 0.380649 / 0.283200 (0.097449) | 0.097017 / 0.141683 (-0.044666) | 1.362248 / 1.452155 (-0.089907) | 1.445699 / 1.492716 (-0.047018) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.204207 / 0.018006 (0.186201) | 0.474471 / 0.000490 (0.473981) | 0.012187 / 0.000200 (0.011987) | 0.000151 / 0.000054 (0.000096) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023123 / 0.037411 (-0.014288) | 0.097547 / 0.014526 (0.083021) | 0.113877 / 0.176557 (-0.062679) | 0.158307 / 0.737135 (-0.578828) | 0.113876 / 0.296338 (-0.182462) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.519920 / 0.215209 (0.304711) | 5.384371 / 2.077655 (3.306716) | 2.263276 / 1.504120 (0.759156) | 1.960604 / 1.541195 (0.419409) | 2.022864 / 1.468490 (0.554374) | 1.015430 / 4.584777 (-3.569347) | 4.774426 / 3.745712 (1.028714) | 4.549598 / 5.269862 (-0.720264) | 2.412638 / 4.565676 (-2.153039) | 0.117983 / 0.424275 (-0.306292) | 0.013340 / 0.007607 (0.005733) | 0.639826 / 0.226044 (0.413782) | 6.491622 / 2.268929 (4.222693) | 2.946892 / 55.444624 (-52.497732) | 2.376393 / 6.876477 (-4.500084) | 2.285592 / 2.142072 (0.143519) | 1.185049 / 4.805227 (-3.620178) | 0.204127 / 6.500664 (-6.296537) | 0.070285 / 0.075469 (-0.005184) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.439736 / 1.841788 (-0.402052) | 14.852087 / 8.074308 (6.777779) | 15.675742 / 10.191392 (5.484350) | 0.206577 / 0.680424 (-0.473846) | 0.031688 / 0.534201 (-0.502513) | 0.471003 / 0.579283 (-0.108280) | 0.505449 / 0.434364 (0.071085) | 0.506114 / 0.540337 (-0.034224) | 0.583752 / 1.386936 (-0.803184) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#d6fcff8a031db39cb31079bc1fa62ded6e35218c \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.012965 / 0.011353 (0.001612) | 0.006660 / 0.011008 (-0.004348) | 0.126060 / 0.038508 (0.087551) | 0.041154 / 0.023109 (0.018045) | 0.413428 / 0.275898 (0.137530) | 0.429035 / 0.323480 (0.105555) | 0.006680 / 0.007986 (-0.001305) | 0.005063 / 0.004328 (0.000734) | 0.092161 / 0.004250 (0.087911) | 0.056092 / 0.037052 (0.019039) | 0.421460 / 0.258489 (0.162971) | 0.450291 / 0.293841 (0.156450) | 0.050820 / 0.128546 (-0.077726) | 0.021392 / 0.075646 (-0.054255) | 0.426915 / 0.419271 (0.007643) | 0.064908 / 0.043533 (0.021375) | 0.406769 / 0.255139 (0.151630) | 0.434344 / 0.283200 (0.151144) | 0.127967 / 0.141683 (-0.013716) | 1.922414 / 1.452155 (0.470260) | 1.940717 / 1.492716 (0.448000) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.288024 / 0.018006 (0.270017) | 0.615859 / 0.000490 (0.615369) | 0.007095 / 0.000200 (0.006895) | 0.000160 / 0.000054 (0.000106) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028182 / 0.037411 (-0.009230) | 0.126277 / 0.014526 (0.111752) | 0.131687 / 0.176557 (-0.044870) | 0.206191 / 0.737135 (-0.530944) | 0.141799 / 0.296338 (-0.154539) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.631580 / 0.215209 (0.416371) | 6.141942 / 2.077655 (4.064287) | 2.476721 / 1.504120 (0.972602) | 2.128850 / 1.541195 (0.587655) | 2.236468 / 1.468490 (0.767978) | 1.188665 / 4.584777 (-3.396112) | 5.481179 / 3.745712 (1.735467) | 3.120333 / 5.269862 (-2.149529) | 2.365889 / 4.565676 (-2.199787) | 0.145081 / 0.424275 (-0.279194) | 0.015866 / 0.007607 (0.008259) | 0.795650 / 0.226044 (0.569605) | 7.595289 / 2.268929 (5.326361) | 3.174418 / 55.444624 (-52.270207) | 2.905207 / 6.876477 (-3.971270) | 2.428263 / 2.142072 (0.286191) | 1.408900 / 4.805227 (-3.396328) | 0.265485 / 6.500664 (-6.235179) | 0.083882 / 0.075469 (0.008413) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.517025 / 1.841788 (-0.324762) | 18.110288 / 8.074308 (10.035980) | 20.810003 / 10.191392 (10.618611) | 0.210380 / 0.680424 (-0.470044) | 0.030180 / 0.534201 (-0.504021) | 0.523453 / 0.579283 (-0.055830) | 0.603896 / 0.434364 (0.169532) | 0.622554 / 0.540337 (0.082216) | 0.737973 / 1.386936 (-0.648963) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009795 / 0.011353 (-0.001558) | 0.006269 / 0.011008 (-0.004739) | 0.099938 / 0.038508 (0.061430) | 0.035162 / 0.023109 (0.012052) | 0.506353 / 0.275898 (0.230455) | 0.527804 / 0.323480 (0.204324) | 0.007211 / 0.007986 (-0.000775) | 0.005498 / 0.004328 (0.001169) | 0.098325 / 0.004250 (0.094075) | 0.054513 / 0.037052 (0.017461) | 0.525764 / 0.258489 (0.267274) | 0.576699 / 0.293841 (0.282858) | 0.052800 / 0.128546 (-0.075747) | 0.021192 / 0.075646 (-0.054454) | 0.117676 / 0.419271 (-0.301596) | 0.055415 / 0.043533 (0.011882) | 0.516746 / 0.255139 (0.261607) | 0.528417 / 0.283200 (0.245217) | 0.116947 / 0.141683 (-0.024735) | 1.757864 / 1.452155 (0.305709) | 2.043632 / 1.492716 (0.550916) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.284018 / 0.018006 (0.266011) | 0.595086 / 0.000490 (0.594596) | 0.001945 / 0.000200 (0.001745) | 0.000127 / 0.000054 (0.000073) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032255 / 0.037411 (-0.005157) | 0.128201 / 0.014526 (0.113676) | 0.139189 / 0.176557 (-0.037367) | 0.199750 / 0.737135 (-0.537385) | 0.149406 / 0.296338 (-0.146933) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.652184 / 0.215209 (0.436975) | 6.453319 / 2.077655 (4.375664) | 2.831566 / 1.504120 (1.327446) | 2.453064 / 1.541195 (0.911869) | 2.622056 / 1.468490 (1.153566) | 1.191279 / 4.584777 (-3.393498) | 5.504720 / 3.745712 (1.759007) | 5.916900 / 5.269862 (0.647038) | 2.974400 / 4.565676 (-1.591277) | 0.142851 / 0.424275 (-0.281424) | 0.015241 / 0.007607 (0.007634) | 0.917537 / 0.226044 (0.691493) | 8.277645 / 2.268929 (6.008717) | 3.700495 / 55.444624 (-51.744130) | 3.047127 / 6.876477 (-3.829350) | 3.093216 / 2.142072 (0.951143) | 1.413529 / 4.805227 (-3.391698) | 0.259395 / 6.500664 (-6.241270) | 0.083144 / 0.075469 (0.007675) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.632240 / 1.841788 (-0.209548) | 18.687403 / 8.074308 (10.613095) | 20.134091 / 10.191392 (9.942699) | 0.238792 / 0.680424 (-0.441632) | 0.027645 / 0.534201 (-0.506556) | 0.518200 / 0.579283 (-0.061083) | 0.613535 / 0.434364 (0.179171) | 0.631414 / 0.540337 (0.091076) | 0.724658 / 1.386936 (-0.662278) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ac7caa5e195ad76c7e8ef98914813383f4f668cf \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006228 / 0.011353 (-0.005125) | 0.004517 / 0.011008 (-0.006492) | 0.097998 / 0.038508 (0.059490) | 0.027903 / 0.023109 (0.004793) | 0.309789 / 0.275898 (0.033891) | 0.332784 / 0.323480 (0.009304) | 0.004757 / 0.007986 (-0.003228) | 0.003348 / 0.004328 (-0.000981) | 0.075193 / 0.004250 (0.070942) | 0.037382 / 0.037052 (0.000330) | 0.306929 / 0.258489 (0.048440) | 0.347304 / 0.293841 (0.053463) | 0.030235 / 0.128546 (-0.098312) | 0.011516 / 0.075646 (-0.064131) | 0.322249 / 0.419271 (-0.097023) | 0.044125 / 0.043533 (0.000592) | 0.303874 / 0.255139 (0.048735) | 0.326808 / 0.283200 (0.043608) | 0.088137 / 0.141683 (-0.053546) | 1.521426 / 1.452155 (0.069272) | 1.573823 / 1.492716 (0.081107) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.203204 / 0.018006 (0.185197) | 0.402247 / 0.000490 (0.401757) | 0.003146 / 0.000200 (0.002946) | 0.000088 / 0.000054 (0.000034) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022955 / 0.037411 (-0.014456) | 0.096059 / 0.014526 (0.081533) | 0.105552 / 0.176557 (-0.071004) | 0.167459 / 0.737135 (-0.569676) | 0.106723 / 0.296338 (-0.189615) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.454626 / 0.215209 (0.239417) | 4.556346 / 2.077655 (2.478691) | 2.220349 / 1.504120 (0.716229) | 2.011820 / 1.541195 (0.470625) | 2.048149 / 1.468490 (0.579659) | 0.697583 / 4.584777 (-3.887194) | 3.428394 / 3.745712 (-0.317318) | 1.863872 / 5.269862 (-3.405989) | 1.159691 / 4.565676 (-3.405985) | 0.082598 / 0.424275 (-0.341677) | 0.012202 / 0.007607 (0.004594) | 0.555617 / 0.226044 (0.329572) | 5.545481 / 2.268929 (3.276553) | 2.650850 / 55.444624 (-52.793775) | 2.305864 / 6.876477 (-4.570613) | 2.392252 / 2.142072 (0.250179) | 0.808512 / 4.805227 (-3.996716) | 0.152086 / 6.500664 (-6.348578) | 0.066440 / 0.075469 (-0.009029) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.211789 / 1.841788 (-0.629999) | 13.515546 / 8.074308 (5.441238) | 13.859870 / 10.191392 (3.668478) | 0.150335 / 0.680424 (-0.530088) | 0.016578 / 0.534201 (-0.517623) | 0.379145 / 0.579283 (-0.200138) | 0.393735 / 0.434364 (-0.040628) | 0.460219 / 0.540337 (-0.080118) | 0.555896 / 1.386936 (-0.831040) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006402 / 0.011353 (-0.004950) | 0.004558 / 0.011008 (-0.006450) | 0.077332 / 0.038508 (0.038824) | 0.027955 / 0.023109 (0.004846) | 0.407877 / 0.275898 (0.131979) | 0.432552 / 0.323480 (0.109072) | 0.004850 / 0.007986 (-0.003135) | 0.003329 / 0.004328 (-0.000999) | 0.075767 / 0.004250 (0.071517) | 0.035940 / 0.037052 (-0.001112) | 0.419544 / 0.258489 (0.161055) | 0.454672 / 0.293841 (0.160831) | 0.030461 / 0.128546 (-0.098085) | 0.011536 / 0.075646 (-0.064111) | 0.085774 / 0.419271 (-0.333498) | 0.039408 / 0.043533 (-0.004125) | 0.389909 / 0.255139 (0.134770) | 0.403287 / 0.283200 (0.120088) | 0.088385 / 0.141683 (-0.053298) | 1.596840 / 1.452155 (0.144686) | 1.659296 / 1.492716 (0.166580) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.216349 / 0.018006 (0.198342) | 0.394969 / 0.000490 (0.394479) | 0.000408 / 0.000200 (0.000208) | 0.000059 / 0.000054 (0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024346 / 0.037411 (-0.013066) | 0.099609 / 0.014526 (0.085084) | 0.106779 / 0.176557 (-0.069778) | 0.156889 / 0.737135 (-0.580247) | 0.110625 / 0.296338 (-0.185714) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.443809 / 0.215209 (0.228600) | 4.450524 / 2.077655 (2.372870) | 2.151694 / 1.504120 (0.647574) | 1.952521 / 1.541195 (0.411326) | 1.963320 / 1.468490 (0.494830) | 0.709291 / 4.584777 (-3.875486) | 3.415708 / 3.745712 (-0.330005) | 1.850498 / 5.269862 (-3.419363) | 1.164355 / 4.565676 (-3.401321) | 0.084977 / 0.424275 (-0.339298) | 0.013284 / 0.007607 (0.005677) | 0.555103 / 0.226044 (0.329059) | 5.583587 / 2.268929 (3.314658) | 2.608754 / 55.444624 (-52.835870) | 2.264079 / 6.876477 (-4.612398) | 2.272455 / 2.142072 (0.130382) | 0.820849 / 4.805227 (-3.984379) | 0.155063 / 6.500664 (-6.345601) | 0.069709 / 0.075469 (-0.005760) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.293285 / 1.841788 (-0.548503) | 14.181867 / 8.074308 (6.107559) | 13.021280 / 10.191392 (2.829888) | 0.130101 / 0.680424 (-0.550323) | 0.016461 / 0.534201 (-0.517740) | 0.383651 / 0.579283 (-0.195632) | 0.387353 / 0.434364 (-0.047011) | 0.443351 / 0.540337 (-0.096986) | 0.529448 / 1.386936 (-0.857488) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#05145d50b5bb1b7b42b76516cd6492d4868c46ba \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007513 / 0.011353 (-0.003840) | 0.005328 / 0.011008 (-0.005680) | 0.096937 / 0.038508 (0.058429) | 0.036230 / 0.023109 (0.013121) | 0.325808 / 0.275898 (0.049910) | 0.363601 / 0.323480 (0.040121) | 0.006130 / 0.007986 (-0.001855) | 0.004352 / 0.004328 (0.000023) | 0.073543 / 0.004250 (0.069293) | 0.054114 / 0.037052 (0.017062) | 0.328952 / 0.258489 (0.070463) | 0.366943 / 0.293841 (0.073102) | 0.035768 / 0.128546 (-0.092778) | 0.012505 / 0.075646 (-0.063142) | 0.332260 / 0.419271 (-0.087012) | 0.066673 / 0.043533 (0.023140) | 0.323866 / 0.255139 (0.068727) | 0.341311 / 0.283200 (0.058112) | 0.129898 / 0.141683 (-0.011785) | 1.456890 / 1.452155 (0.004735) | 1.546933 / 1.492716 (0.054217) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.299236 / 0.018006 (0.281229) | 0.496134 / 0.000490 (0.495645) | 0.004233 / 0.000200 (0.004033) | 0.000081 / 0.000054 (0.000027) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028089 / 0.037411 (-0.009322) | 0.104723 / 0.014526 (0.090197) | 0.121032 / 0.176557 (-0.055525) | 0.179916 / 0.737135 (-0.557220) | 0.126628 / 0.296338 (-0.169711) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.403497 / 0.215209 (0.188288) | 4.052481 / 2.077655 (1.974827) | 1.804419 / 1.504120 (0.300299) | 1.619833 / 1.541195 (0.078638) | 1.732438 / 1.468490 (0.263948) | 0.702474 / 4.584777 (-3.882303) | 3.808973 / 3.745712 (0.063261) | 3.682764 / 5.269862 (-1.587098) | 1.919184 / 4.565676 (-2.646493) | 0.086638 / 0.424275 (-0.337637) | 0.012265 / 0.007607 (0.004658) | 0.501273 / 0.226044 (0.275229) | 5.010918 / 2.268929 (2.741989) | 2.278114 / 55.444624 (-53.166510) | 1.942266 / 6.876477 (-4.934211) | 2.101982 / 2.142072 (-0.040091) | 0.847622 / 4.805227 (-3.957606) | 0.172973 / 6.500664 (-6.327691) | 0.066884 / 0.075469 (-0.008586) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.187609 / 1.841788 (-0.654179) | 15.089485 / 8.074308 (7.015177) | 14.787398 / 10.191392 (4.596006) | 0.168254 / 0.680424 (-0.512170) | 0.018266 / 0.534201 (-0.515935) | 0.423204 / 0.579283 (-0.156079) | 0.435238 / 0.434364 (0.000874) | 0.512473 / 0.540337 (-0.027864) | 0.618091 / 1.386936 (-0.768845) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007249 / 0.011353 (-0.004104) | 0.005297 / 0.011008 (-0.005711) | 0.076428 / 0.038508 (0.037920) | 0.033565 / 0.023109 (0.010456) | 0.373756 / 0.275898 (0.097858) | 0.407405 / 0.323480 (0.083925) | 0.006100 / 0.007986 (-0.001886) | 0.006482 / 0.004328 (0.002153) | 0.075884 / 0.004250 (0.071633) | 0.055338 / 0.037052 (0.018286) | 0.378721 / 0.258489 (0.120232) | 0.427065 / 0.293841 (0.133224) | 0.036285 / 0.128546 (-0.092261) | 0.012460 / 0.075646 (-0.063186) | 0.087641 / 0.419271 (-0.331630) | 0.048199 / 0.043533 (0.004666) | 0.386785 / 0.255139 (0.131646) | 0.386702 / 0.283200 (0.103503) | 0.110087 / 0.141683 (-0.031596) | 1.511204 / 1.452155 (0.059050) | 1.585671 / 1.492716 (0.092954) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.313558 / 0.018006 (0.295552) | 0.496991 / 0.000490 (0.496501) | 0.001492 / 0.000200 (0.001292) | 0.000093 / 0.000054 (0.000038) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031814 / 0.037411 (-0.005597) | 0.113486 / 0.014526 (0.098960) | 0.125208 / 0.176557 (-0.051348) | 0.174469 / 0.737135 (-0.562666) | 0.131095 / 0.296338 (-0.165244) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.439282 / 0.215209 (0.224073) | 4.362286 / 2.077655 (2.284631) | 2.153271 / 1.504120 (0.649151) | 1.990482 / 1.541195 (0.449288) | 2.103322 / 1.468490 (0.634831) | 0.692522 / 4.584777 (-3.892254) | 3.861931 / 3.745712 (0.116219) | 3.686294 / 5.269862 (-1.583567) | 1.734525 / 4.565676 (-2.831152) | 0.085057 / 0.424275 (-0.339218) | 0.012116 / 0.007607 (0.004509) | 0.547996 / 0.226044 (0.321952) | 5.513835 / 2.268929 (3.244906) | 2.723829 / 55.444624 (-52.720795) | 2.404715 / 6.876477 (-4.471761) | 2.514768 / 2.142072 (0.372696) | 0.834972 / 4.805227 (-3.970255) | 0.168261 / 6.500664 (-6.332403) | 0.066464 / 0.075469 (-0.009005) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.259923 / 1.841788 (-0.581865) | 15.646277 / 8.074308 (7.571969) | 13.097598 / 10.191392 (2.906206) | 0.187991 / 0.680424 (-0.492433) | 0.017358 / 0.534201 (-0.516843) | 0.427979 / 0.579283 (-0.151304) | 0.425747 / 0.434364 (-0.008617) | 0.501907 / 0.540337 (-0.038431) | 0.595106 / 1.386936 (-0.791830) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#db56f7f0d2f0b99af4da17d388c205152504c7d9 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009378 / 0.011353 (-0.001975) | 0.006434 / 0.011008 (-0.004574) | 0.120603 / 0.038508 (0.082095) | 0.042929 / 0.023109 (0.019820) | 0.366853 / 0.275898 (0.090955) | 0.436795 / 0.323480 (0.113315) | 0.007730 / 0.007986 (-0.000256) | 0.004842 / 0.004328 (0.000513) | 0.091058 / 0.004250 (0.086808) | 0.058256 / 0.037052 (0.021203) | 0.378692 / 0.258489 (0.120203) | 0.467384 / 0.293841 (0.173543) | 0.042948 / 0.128546 (-0.085598) | 0.015172 / 0.075646 (-0.060475) | 0.409225 / 0.419271 (-0.010046) | 0.083672 / 0.043533 (0.040140) | 0.390088 / 0.255139 (0.134949) | 0.406965 / 0.283200 (0.123765) | 0.142132 / 0.141683 (0.000449) | 1.765737 / 1.452155 (0.313582) | 1.895419 / 1.492716 (0.402703) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.244052 / 0.018006 (0.226046) | 0.553383 / 0.000490 (0.552893) | 0.006798 / 0.000200 (0.006598) | 0.000227 / 0.000054 (0.000173) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032032 / 0.037411 (-0.005380) | 0.129990 / 0.014526 (0.115464) | 0.140338 / 0.176557 (-0.036219) | 0.212155 / 0.737135 (-0.524980) | 0.147395 / 0.296338 (-0.148943) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.478760 / 0.215209 (0.263551) | 4.751335 / 2.077655 (2.673680) | 2.164755 / 1.504120 (0.660635) | 1.944288 / 1.541195 (0.403094) | 2.077657 / 1.468490 (0.609167) | 0.818519 / 4.584777 (-3.766258) | 4.689013 / 3.745712 (0.943301) | 2.484079 / 5.269862 (-2.785782) | 1.788632 / 4.565676 (-2.777044) | 0.100484 / 0.424275 (-0.323791) | 0.013838 / 0.007607 (0.006231) | 0.589650 / 0.226044 (0.363605) | 5.859461 / 2.268929 (3.590533) | 2.670025 / 55.444624 (-52.774599) | 2.688709 / 6.876477 (-4.187768) | 2.408060 / 2.142072 (0.265988) | 0.972107 / 4.805227 (-3.833120) | 0.194425 / 6.500664 (-6.306239) | 0.076077 / 0.075469 (0.000608) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.430150 / 1.841788 (-0.411638) | 17.710507 / 8.074308 (9.636199) | 16.210789 / 10.191392 (6.019397) | 0.163940 / 0.680424 (-0.516484) | 0.020295 / 0.534201 (-0.513906) | 0.472596 / 0.579283 (-0.106687) | 0.483107 / 0.434364 (0.048743) | 0.585269 / 0.540337 (0.044931) | 0.705526 / 1.386936 (-0.681410) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008864 / 0.011353 (-0.002489) | 0.006095 / 0.011008 (-0.004913) | 0.088702 / 0.038508 (0.050194) | 0.041596 / 0.023109 (0.018486) | 0.453515 / 0.275898 (0.177617) | 0.476217 / 0.323480 (0.152737) | 0.007574 / 0.007986 (-0.000412) | 0.004727 / 0.004328 (0.000398) | 0.087271 / 0.004250 (0.083021) | 0.059631 / 0.037052 (0.022578) | 0.449379 / 0.258489 (0.190890) | 0.494436 / 0.293841 (0.200595) | 0.043448 / 0.128546 (-0.085098) | 0.014580 / 0.075646 (-0.061067) | 0.103836 / 0.419271 (-0.315435) | 0.057537 / 0.043533 (0.014004) | 0.449359 / 0.255139 (0.194220) | 0.447577 / 0.283200 (0.164377) | 0.123600 / 0.141683 (-0.018083) | 1.748448 / 1.452155 (0.296294) | 1.902116 / 1.492716 (0.409399) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.237214 / 0.018006 (0.219207) | 0.497648 / 0.000490 (0.497158) | 0.003519 / 0.000200 (0.003319) | 0.000112 / 0.000054 (0.000058) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.034477 / 0.037411 (-0.002934) | 0.132627 / 0.014526 (0.118101) | 0.139721 / 0.176557 (-0.036836) | 0.195705 / 0.737135 (-0.541430) | 0.150762 / 0.296338 (-0.145577) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.521306 / 0.215209 (0.306097) | 5.184982 / 2.077655 (3.107328) | 2.503979 / 1.504120 (0.999859) | 2.301054 / 1.541195 (0.759860) | 2.352713 / 1.468490 (0.884222) | 0.819804 / 4.584777 (-3.764973) | 4.584011 / 3.745712 (0.838299) | 2.497311 / 5.269862 (-2.772550) | 1.561262 / 4.565676 (-3.004414) | 0.101814 / 0.424275 (-0.322461) | 0.014078 / 0.007607 (0.006471) | 0.666564 / 0.226044 (0.440520) | 6.616379 / 2.268929 (4.347450) | 3.263892 / 55.444624 (-52.180732) | 2.891774 / 6.876477 (-3.984703) | 2.945260 / 2.142072 (0.803188) | 1.014379 / 4.805227 (-3.790848) | 0.201762 / 6.500664 (-6.298902) | 0.078012 / 0.075469 (0.002543) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.567808 / 1.841788 (-0.273980) | 19.096552 / 8.074308 (11.022244) | 15.522285 / 10.191392 (5.330893) | 0.226568 / 0.680424 (-0.453856) | 0.021078 / 0.534201 (-0.513123) | 0.501686 / 0.579283 (-0.077597) | 0.517575 / 0.434364 (0.083211) | 0.589685 / 0.540337 (0.049348) | 0.705053 / 1.386936 (-0.681883) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#db56f7f0d2f0b99af4da17d388c205152504c7d9 \"CML watermark\")\n" ]
2023-05-11T17:29:57Z
2023-05-15T07:39:13Z
2023-05-12T15:14:48Z
COLLABORATOR
null
null
null
Adds the `date_format` param introduced in Pandas 2.0 to the CSV reader and improves its type hints.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5845/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5845/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5845.diff", "html_url": "https://github.com/huggingface/datasets/pull/5845", "merged_at": "2023-05-12T15:14:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/5845.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5845" }
https://api.github.com/repos/huggingface/datasets/issues/5974
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5974/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5974/comments
https://api.github.com/repos/huggingface/datasets/issues/5974/events
https://github.com/huggingface/datasets/pull/5974
1,767,981,231
PR_kwDODunzps5TkXCb
5,974
Deprecate `errors` param in favor of `encoding_errors` in text builder
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006518 / 0.011353 (-0.004835) | 0.004121 / 0.011008 (-0.006887) | 0.103350 / 0.038508 (0.064842) | 0.045030 / 0.023109 (0.021920) | 0.351670 / 0.275898 (0.075772) | 0.408110 / 0.323480 (0.084630) | 0.003883 / 0.007986 (-0.004102) | 0.003352 / 0.004328 (-0.000977) | 0.078786 / 0.004250 (0.074535) | 0.063977 / 0.037052 (0.026925) | 0.369759 / 0.258489 (0.111270) | 0.415103 / 0.293841 (0.121262) | 0.033069 / 0.128546 (-0.095477) | 0.008863 / 0.075646 (-0.066783) | 0.353660 / 0.419271 (-0.065611) | 0.055714 / 0.043533 (0.012181) | 0.350458 / 0.255139 (0.095319) | 0.369505 / 0.283200 (0.086305) | 0.022822 / 0.141683 (-0.118861) | 1.537588 / 1.452155 (0.085433) | 1.590569 / 1.492716 (0.097853) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.206826 / 0.018006 (0.188819) | 0.471625 / 0.000490 (0.471135) | 0.005188 / 0.000200 (0.004988) | 0.000316 / 0.000054 (0.000261) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028148 / 0.037411 (-0.009263) | 0.111941 / 0.014526 (0.097415) | 0.122106 / 0.176557 (-0.054451) | 0.181127 / 0.737135 (-0.556009) | 0.127534 / 0.296338 (-0.168805) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.409520 / 0.215209 (0.194311) | 4.098455 / 2.077655 (2.020800) | 1.852447 / 1.504120 (0.348327) | 1.657036 / 1.541195 (0.115842) | 1.709624 / 1.468490 (0.241134) | 0.542806 / 4.584777 (-4.041970) | 3.809352 / 3.745712 (0.063640) | 1.855412 / 5.269862 (-3.414449) | 1.109180 / 4.565676 (-3.456497) | 0.066801 / 0.424275 (-0.357474) | 0.011832 / 0.007607 (0.004225) | 0.518338 / 0.226044 (0.292293) | 5.190108 / 2.268929 (2.921179) | 2.320602 / 55.444624 (-53.124023) | 1.991416 / 6.876477 (-4.885060) | 2.106989 / 2.142072 (-0.035084) | 0.668914 / 4.805227 (-4.136313) | 0.145325 / 6.500664 (-6.355340) | 0.065145 / 0.075469 (-0.010324) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.254706 / 1.841788 (-0.587082) | 14.707264 / 8.074308 (6.632956) | 14.615423 / 10.191392 (4.424031) | 0.170764 / 0.680424 (-0.509659) | 0.017905 / 0.534201 (-0.516296) | 0.435606 / 0.579283 (-0.143677) | 0.434648 / 0.434364 (0.000284) | 0.520813 / 0.540337 (-0.019524) | 0.633902 / 1.386936 (-0.753034) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007212 / 0.011353 (-0.004141) | 0.004301 / 0.011008 (-0.006707) | 0.080767 / 0.038508 (0.042258) | 0.051949 / 0.023109 (0.028840) | 0.398473 / 0.275898 (0.122575) | 0.465038 / 0.323480 (0.141558) | 0.005580 / 0.007986 (-0.002406) | 0.003556 / 0.004328 (-0.000773) | 0.080682 / 0.004250 (0.076431) | 0.059517 / 0.037052 (0.022464) | 0.421171 / 0.258489 (0.162682) | 0.459752 / 0.293841 (0.165911) | 0.032960 / 0.128546 (-0.095586) | 0.009107 / 0.075646 (-0.066539) | 0.086382 / 0.419271 (-0.332889) | 0.056053 / 0.043533 (0.012520) | 0.393357 / 0.255139 (0.138218) | 0.412972 / 0.283200 (0.129772) | 0.031115 / 0.141683 (-0.110568) | 1.576961 / 1.452155 (0.124806) | 1.627249 / 1.492716 (0.134533) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.227618 / 0.018006 (0.209612) | 0.444640 / 0.000490 (0.444150) | 0.004376 / 0.000200 (0.004176) | 0.000092 / 0.000054 (0.000038) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030826 / 0.037411 (-0.006586) | 0.117587 / 0.014526 (0.103062) | 0.127467 / 0.176557 (-0.049089) | 0.184440 / 0.737135 (-0.552695) | 0.133664 / 0.296338 (-0.162675) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.443183 / 0.215209 (0.227974) | 4.408312 / 2.077655 (2.330658) | 2.132487 / 1.504120 (0.628367) | 1.923632 / 1.541195 (0.382438) | 1.967882 / 1.468490 (0.499392) | 0.552954 / 4.584777 (-4.031823) | 3.777701 / 3.745712 (0.031989) | 1.857686 / 5.269862 (-3.412176) | 1.104847 / 4.565676 (-3.460829) | 0.068350 / 0.424275 (-0.355925) | 0.012437 / 0.007607 (0.004830) | 0.559258 / 0.226044 (0.333214) | 5.593258 / 2.268929 (3.324330) | 2.648059 / 55.444624 (-52.796565) | 2.277428 / 6.876477 (-4.599049) | 2.351685 / 2.142072 (0.209612) | 0.678750 / 4.805227 (-4.126477) | 0.145550 / 6.500664 (-6.355114) | 0.066556 / 0.075469 (-0.008913) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.327128 / 1.841788 (-0.514659) | 15.649079 / 8.074308 (7.574771) | 14.478659 / 10.191392 (4.287267) | 0.147633 / 0.680424 (-0.532791) | 0.018502 / 0.534201 (-0.515699) | 0.438556 / 0.579283 (-0.140727) | 0.433381 / 0.434364 (-0.000983) | 0.514367 / 0.540337 (-0.025970) | 0.618347 / 1.386936 (-0.768589) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#16aa1c886c5b499641a4bb3d8ce4a4f7de8244b7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006078 / 0.011353 (-0.005275) | 0.003914 / 0.011008 (-0.007095) | 0.102039 / 0.038508 (0.063531) | 0.037660 / 0.023109 (0.014551) | 0.348963 / 0.275898 (0.073065) | 0.407284 / 0.323480 (0.083804) | 0.004661 / 0.007986 (-0.003324) | 0.003253 / 0.004328 (-0.001076) | 0.078276 / 0.004250 (0.074025) | 0.054144 / 0.037052 (0.017091) | 0.376715 / 0.258489 (0.118225) | 0.418499 / 0.293841 (0.124658) | 0.027627 / 0.128546 (-0.100919) | 0.008494 / 0.075646 (-0.067152) | 0.316894 / 0.419271 (-0.102377) | 0.046560 / 0.043533 (0.003027) | 0.339835 / 0.255139 (0.084696) | 0.374628 / 0.283200 (0.091428) | 0.020729 / 0.141683 (-0.120954) | 1.502769 / 1.452155 (0.050615) | 1.548756 / 1.492716 (0.056040) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.229192 / 0.018006 (0.211186) | 0.426245 / 0.000490 (0.425756) | 0.005190 / 0.000200 (0.004990) | 0.000081 / 0.000054 (0.000026) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024271 / 0.037411 (-0.013140) | 0.098869 / 0.014526 (0.084343) | 0.105079 / 0.176557 (-0.071477) | 0.164707 / 0.737135 (-0.572428) | 0.110337 / 0.296338 (-0.186002) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.426593 / 0.215209 (0.211383) | 4.293977 / 2.077655 (2.216323) | 1.928502 / 1.504120 (0.424382) | 1.728623 / 1.541195 (0.187428) | 1.792084 / 1.468490 (0.323594) | 0.568737 / 4.584777 (-4.016040) | 3.438534 / 3.745712 (-0.307178) | 1.797798 / 5.269862 (-3.472063) | 1.054078 / 4.565676 (-3.511598) | 0.068711 / 0.424275 (-0.355564) | 0.011250 / 0.007607 (0.003643) | 0.529299 / 0.226044 (0.303255) | 5.283965 / 2.268929 (3.015037) | 2.358274 / 55.444624 (-53.086350) | 2.012818 / 6.876477 (-4.863659) | 2.109923 / 2.142072 (-0.032149) | 0.679556 / 4.805227 (-4.125671) | 0.138346 / 6.500664 (-6.362318) | 0.066349 / 0.075469 (-0.009120) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.193994 / 1.841788 (-0.647794) | 14.073158 / 8.074308 (5.998850) | 13.488525 / 10.191392 (3.297133) | 0.144536 / 0.680424 (-0.535888) | 0.016748 / 0.534201 (-0.517453) | 0.362703 / 0.579283 (-0.216580) | 0.389511 / 0.434364 (-0.044853) | 0.427296 / 0.540337 (-0.113041) | 0.513227 / 1.386936 (-0.873709) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006215 / 0.011353 (-0.005138) | 0.003834 / 0.011008 (-0.007174) | 0.078001 / 0.038508 (0.039493) | 0.036537 / 0.023109 (0.013428) | 0.369724 / 0.275898 (0.093826) | 0.426761 / 0.323480 (0.103281) | 0.003602 / 0.007986 (-0.004383) | 0.003001 / 0.004328 (-0.001327) | 0.075989 / 0.004250 (0.071739) | 0.048618 / 0.037052 (0.011566) | 0.374296 / 0.258489 (0.115807) | 0.430330 / 0.293841 (0.136489) | 0.028299 / 0.128546 (-0.100247) | 0.008537 / 0.075646 (-0.067109) | 0.083275 / 0.419271 (-0.335997) | 0.043136 / 0.043533 (-0.000397) | 0.359072 / 0.255139 (0.103933) | 0.387391 / 0.283200 (0.104192) | 0.021202 / 0.141683 (-0.120481) | 1.520832 / 1.452155 (0.068677) | 1.567030 / 1.492716 (0.074313) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.230944 / 0.018006 (0.212938) | 0.422159 / 0.000490 (0.421669) | 0.003447 / 0.000200 (0.003247) | 0.000125 / 0.000054 (0.000071) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025442 / 0.037411 (-0.011969) | 0.103944 / 0.014526 (0.089418) | 0.110577 / 0.176557 (-0.065979) | 0.161393 / 0.737135 (-0.575743) | 0.113482 / 0.296338 (-0.182857) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.485765 / 0.215209 (0.270556) | 4.845737 / 2.077655 (2.768083) | 2.556732 / 1.504120 (1.052612) | 2.348638 / 1.541195 (0.807443) | 2.379289 / 1.468490 (0.910799) | 0.561261 / 4.584777 (-4.023516) | 3.482468 / 3.745712 (-0.263244) | 3.061319 / 5.269862 (-2.208543) | 1.483938 / 4.565676 (-3.081738) | 0.067584 / 0.424275 (-0.356691) | 0.011333 / 0.007607 (0.003726) | 0.594342 / 0.226044 (0.368297) | 5.935477 / 2.268929 (3.666548) | 3.025029 / 55.444624 (-52.419595) | 2.687032 / 6.876477 (-4.189445) | 2.752470 / 2.142072 (0.610398) | 0.674470 / 4.805227 (-4.130757) | 0.136777 / 6.500664 (-6.363887) | 0.068335 / 0.075469 (-0.007134) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.336456 / 1.841788 (-0.505332) | 14.376007 / 8.074308 (6.301699) | 14.171375 / 10.191392 (3.979983) | 0.159620 / 0.680424 (-0.520804) | 0.016685 / 0.534201 (-0.517516) | 0.364344 / 0.579283 (-0.214939) | 0.395358 / 0.434364 (-0.039006) | 0.424876 / 0.540337 (-0.115461) | 0.513267 / 1.386936 (-0.873669) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#6ed837325cb539a5deb99129e5ad181d0269e050 \"CML watermark\")\n" ]
2023-06-21T16:31:38Z
2023-06-26T10:34:43Z
2023-06-26T10:27:40Z
COLLABORATOR
null
null
null
For consistency with the JSON builder and Pandas
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5974/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5974/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5974.diff", "html_url": "https://github.com/huggingface/datasets/pull/5974", "merged_at": "2023-06-26T10:27:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/5974.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5974" }
https://api.github.com/repos/huggingface/datasets/issues/4824
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4824/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4824/comments
https://api.github.com/repos/huggingface/datasets/issues/4824/events
https://github.com/huggingface/datasets/pull/4824
1,335,826,639
PR_kwDODunzps49BR5H
4,824
Fix titles in dataset cards
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "The non-passing tests are caused by other missing information in the dataset cards." ]
2022-08-11T11:27:48Z
2022-08-11T13:46:11Z
2022-08-11T12:56:49Z
MEMBER
null
null
null
Fix all the titles in the dataset cards, so that they conform to the required format.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4824/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4824/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4824.diff", "html_url": "https://github.com/huggingface/datasets/pull/4824", "merged_at": "2022-08-11T12:56:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/4824.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4824" }
https://api.github.com/repos/huggingface/datasets/issues/6426
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6426/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6426/comments
https://api.github.com/repos/huggingface/datasets/issues/6426/events
https://github.com/huggingface/datasets/pull/6426
1,995,363,264
PR_kwDODunzps5fjOEK
6,426
More robust temporary directory deletion
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6426). All of your documentation changes will be reflected on that endpoint.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004750 / 0.011353 (-0.006603) | 0.002928 / 0.011008 (-0.008080) | 0.061962 / 0.038508 (0.023454) | 0.029878 / 0.023109 (0.006768) | 0.233380 / 0.275898 (-0.042518) | 0.262221 / 0.323480 (-0.061259) | 0.002982 / 0.007986 (-0.005004) | 0.003698 / 0.004328 (-0.000630) | 0.048565 / 0.004250 (0.044314) | 0.046107 / 0.037052 (0.009055) | 0.240090 / 0.258489 (-0.018399) | 0.267294 / 0.293841 (-0.026547) | 0.023335 / 0.128546 (-0.105211) | 0.007221 / 0.075646 (-0.068425) | 0.200903 / 0.419271 (-0.218369) | 0.059237 / 0.043533 (0.015705) | 0.234929 / 0.255139 (-0.020210) | 0.256326 / 0.283200 (-0.026874) | 0.018549 / 0.141683 (-0.123134) | 1.103519 / 1.452155 (-0.348635) | 1.156573 / 1.492716 (-0.336143) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091205 / 0.018006 (0.073199) | 0.303533 / 0.000490 (0.303043) | 0.000204 / 0.000200 (0.000004) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018572 / 0.037411 (-0.018839) | 0.062323 / 0.014526 (0.047797) | 0.074528 / 0.176557 (-0.102029) | 0.120295 / 0.737135 (-0.616841) | 0.076786 / 0.296338 (-0.219552) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.278814 / 0.215209 (0.063605) | 2.745483 / 2.077655 (0.667829) | 1.486073 / 1.504120 (-0.018047) | 1.385334 / 1.541195 (-0.155861) | 1.386351 / 1.468490 (-0.082139) | 0.395545 / 4.584777 (-4.189232) | 2.409468 / 3.745712 (-1.336244) | 2.670702 / 5.269862 (-2.599159) | 1.629245 / 4.565676 (-2.936432) | 0.045990 / 0.424275 (-0.378286) | 0.004782 / 0.007607 (-0.002825) | 0.332912 / 0.226044 (0.106867) | 3.249277 / 2.268929 (0.980349) | 1.888690 / 55.444624 (-53.555934) | 1.533462 / 6.876477 (-5.343015) | 1.576045 / 2.142072 (-0.566027) | 0.473090 / 4.805227 (-4.332138) | 0.099448 / 6.500664 (-6.401216) | 0.042613 / 0.075469 (-0.032857) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.944229 / 1.841788 (-0.897559) | 12.103621 / 8.074308 (4.029313) | 10.643471 / 10.191392 (0.452079) | 0.143004 / 0.680424 (-0.537420) | 0.013872 / 0.534201 (-0.520329) | 0.272026 / 0.579283 (-0.307257) | 0.298701 / 0.434364 (-0.135663) | 0.310299 / 0.540337 (-0.230038) | 0.420934 / 1.386936 (-0.966002) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004904 / 0.011353 (-0.006449) | 0.003064 / 0.011008 (-0.007945) | 0.047982 / 0.038508 (0.009474) | 0.056354 / 0.023109 (0.033245) | 0.292893 / 0.275898 (0.016995) | 0.348744 / 0.323480 (0.025264) | 0.003988 / 0.007986 (-0.003997) | 0.002431 / 0.004328 (-0.001898) | 0.049108 / 0.004250 (0.044857) | 0.039055 / 0.037052 (0.002002) | 0.278129 / 0.258489 (0.019640) | 0.318547 / 0.293841 (0.024706) | 0.025040 / 0.128546 (-0.103507) | 0.007166 / 0.075646 (-0.068480) | 0.053967 / 0.419271 (-0.365305) | 0.033128 / 0.043533 (-0.010405) | 0.272849 / 0.255139 (0.017710) | 0.312143 / 0.283200 (0.028943) | 0.017942 / 0.141683 (-0.123741) | 1.192297 / 1.452155 (-0.259857) | 1.328102 / 1.492716 (-0.164615) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090903 / 0.018006 (0.072896) | 0.301260 / 0.000490 (0.300770) | 0.000215 / 0.000200 (0.000015) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021112 / 0.037411 (-0.016300) | 0.070181 / 0.014526 (0.055656) | 0.082431 / 0.176557 (-0.094126) | 0.121973 / 0.737135 (-0.615163) | 0.083617 / 0.296338 (-0.212721) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289587 / 0.215209 (0.074378) | 2.877895 / 2.077655 (0.800240) | 1.721417 / 1.504120 (0.217297) | 1.536023 / 1.541195 (-0.005171) | 1.550917 / 1.468490 (0.082427) | 0.402978 / 4.584777 (-4.181799) | 2.431767 / 3.745712 (-1.313946) | 2.544419 / 5.269862 (-2.725442) | 1.554562 / 4.565676 (-3.011115) | 0.046260 / 0.424275 (-0.378015) | 0.004923 / 0.007607 (-0.002684) | 0.341584 / 0.226044 (0.115540) | 3.362133 / 2.268929 (1.093205) | 1.928741 / 55.444624 (-53.515884) | 1.654798 / 6.876477 (-5.221679) | 1.715111 / 2.142072 (-0.426962) | 0.471029 / 4.805227 (-4.334198) | 0.098912 / 6.500664 (-6.401752) | 0.041018 / 0.075469 (-0.034451) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.992880 / 1.841788 (-0.848907) | 12.083890 / 8.074308 (4.009582) | 11.023833 / 10.191392 (0.832441) | 0.139217 / 0.680424 (-0.541207) | 0.015183 / 0.534201 (-0.519018) | 0.271637 / 0.579283 (-0.307646) | 0.278910 / 0.434364 (-0.155454) | 0.306891 / 0.540337 (-0.233447) | 0.424412 / 1.386936 (-0.962524) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#2d51f37eb9996d4c52250ee6e987ccce0d74f2f4 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004545 / 0.011353 (-0.006808) | 0.002955 / 0.011008 (-0.008054) | 0.062119 / 0.038508 (0.023611) | 0.029357 / 0.023109 (0.006248) | 0.240068 / 0.275898 (-0.035830) | 0.273376 / 0.323480 (-0.050104) | 0.003884 / 0.007986 (-0.004102) | 0.002390 / 0.004328 (-0.001938) | 0.048621 / 0.004250 (0.044371) | 0.043867 / 0.037052 (0.006815) | 0.247240 / 0.258489 (-0.011249) | 0.279187 / 0.293841 (-0.014654) | 0.023377 / 0.128546 (-0.105169) | 0.007261 / 0.075646 (-0.068385) | 0.201913 / 0.419271 (-0.217359) | 0.057063 / 0.043533 (0.013530) | 0.245698 / 0.255139 (-0.009441) | 0.265644 / 0.283200 (-0.017556) | 0.018077 / 0.141683 (-0.123606) | 1.133225 / 1.452155 (-0.318930) | 1.186380 / 1.492716 (-0.306336) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089639 / 0.018006 (0.071632) | 0.298918 / 0.000490 (0.298428) | 0.000198 / 0.000200 (-0.000002) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019037 / 0.037411 (-0.018374) | 0.062580 / 0.014526 (0.048055) | 0.072974 / 0.176557 (-0.103582) | 0.119909 / 0.737135 (-0.617226) | 0.075021 / 0.296338 (-0.221317) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.276561 / 0.215209 (0.061352) | 2.697281 / 2.077655 (0.619626) | 1.419772 / 1.504120 (-0.084348) | 1.302079 / 1.541195 (-0.239115) | 1.329143 / 1.468490 (-0.139347) | 0.395528 / 4.584777 (-4.189249) | 2.365788 / 3.745712 (-1.379925) | 2.583802 / 5.269862 (-2.686059) | 1.561983 / 4.565676 (-3.003694) | 0.045269 / 0.424275 (-0.379006) | 0.004826 / 0.007607 (-0.002781) | 0.331041 / 0.226044 (0.104996) | 3.292523 / 2.268929 (1.023595) | 1.797865 / 55.444624 (-53.646759) | 1.509229 / 6.876477 (-5.367248) | 1.498884 / 2.142072 (-0.643188) | 0.458518 / 4.805227 (-4.346709) | 0.098076 / 6.500664 (-6.402588) | 0.042290 / 0.075469 (-0.033179) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.922331 / 1.841788 (-0.919457) | 11.605041 / 8.074308 (3.530732) | 10.471664 / 10.191392 (0.280272) | 0.130325 / 0.680424 (-0.550098) | 0.014084 / 0.534201 (-0.520117) | 0.278877 / 0.579283 (-0.300406) | 0.263104 / 0.434364 (-0.171259) | 0.306723 / 0.540337 (-0.233615) | 0.416238 / 1.386936 (-0.970698) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005094 / 0.011353 (-0.006259) | 0.002794 / 0.011008 (-0.008214) | 0.048189 / 0.038508 (0.009680) | 0.050409 / 0.023109 (0.027300) | 0.272618 / 0.275898 (-0.003280) | 0.293589 / 0.323480 (-0.029891) | 0.003995 / 0.007986 (-0.003991) | 0.002373 / 0.004328 (-0.001956) | 0.048269 / 0.004250 (0.044018) | 0.038751 / 0.037052 (0.001698) | 0.273495 / 0.258489 (0.015006) | 0.309244 / 0.293841 (0.015403) | 0.024681 / 0.128546 (-0.103866) | 0.007390 / 0.075646 (-0.068256) | 0.053844 / 0.419271 (-0.365427) | 0.032395 / 0.043533 (-0.011137) | 0.271963 / 0.255139 (0.016824) | 0.289557 / 0.283200 (0.006357) | 0.018659 / 0.141683 (-0.123024) | 1.154478 / 1.452155 (-0.297676) | 1.199772 / 1.492716 (-0.292944) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089771 / 0.018006 (0.071764) | 0.299468 / 0.000490 (0.298978) | 0.000219 / 0.000200 (0.000020) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021854 / 0.037411 (-0.015558) | 0.070280 / 0.014526 (0.055754) | 0.080956 / 0.176557 (-0.095600) | 0.119430 / 0.737135 (-0.617705) | 0.082778 / 0.296338 (-0.213561) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.304273 / 0.215209 (0.089064) | 2.968264 / 2.077655 (0.890609) | 1.592363 / 1.504120 (0.088243) | 1.460795 / 1.541195 (-0.080400) | 1.501545 / 1.468490 (0.033055) | 0.411001 / 4.584777 (-4.173776) | 2.464273 / 3.745712 (-1.281439) | 2.524585 / 5.269862 (-2.745277) | 1.537443 / 4.565676 (-3.028234) | 0.046163 / 0.424275 (-0.378112) | 0.004783 / 0.007607 (-0.002824) | 0.354251 / 0.226044 (0.128206) | 3.512087 / 2.268929 (1.243158) | 1.968156 / 55.444624 (-53.476468) | 1.664966 / 6.876477 (-5.211510) | 1.685013 / 2.142072 (-0.457060) | 0.485793 / 4.805227 (-4.319435) | 0.099789 / 6.500664 (-6.400875) | 0.040705 / 0.075469 (-0.034764) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.966570 / 1.841788 (-0.875218) | 12.023188 / 8.074308 (3.948880) | 11.122602 / 10.191392 (0.931210) | 0.141002 / 0.680424 (-0.539422) | 0.015955 / 0.534201 (-0.518246) | 0.270293 / 0.579283 (-0.308990) | 0.281839 / 0.434364 (-0.152525) | 0.307279 / 0.540337 (-0.233058) | 0.434687 / 1.386936 (-0.952249) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#7eaad71464e85c7358eaa36494227a43257ffcd8 \"CML watermark\")\n", "What would be the impact for non-windows users ?\r\n\r\nAlso I wonder if a gc.collect() after the `del` could help to remove the PermissionError ? Or register the dataset for deletion on copy/pickle maybe ?", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004973 / 0.011353 (-0.006380) | 0.002753 / 0.011008 (-0.008256) | 0.061489 / 0.038508 (0.022981) | 0.051122 / 0.023109 (0.028012) | 0.228783 / 0.275898 (-0.047115) | 0.256982 / 0.323480 (-0.066498) | 0.002873 / 0.007986 (-0.005112) | 0.003544 / 0.004328 (-0.000784) | 0.048721 / 0.004250 (0.044471) | 0.039137 / 0.037052 (0.002085) | 0.244988 / 0.258489 (-0.013501) | 0.275230 / 0.293841 (-0.018611) | 0.023034 / 0.128546 (-0.105513) | 0.006988 / 0.075646 (-0.068658) | 0.202780 / 0.419271 (-0.216492) | 0.035325 / 0.043533 (-0.008207) | 0.241722 / 0.255139 (-0.013417) | 0.259671 / 0.283200 (-0.023528) | 0.019875 / 0.141683 (-0.121808) | 1.098667 / 1.452155 (-0.353488) | 1.161444 / 1.492716 (-0.331272) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093591 / 0.018006 (0.075585) | 0.298703 / 0.000490 (0.298213) | 0.000219 / 0.000200 (0.000019) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018319 / 0.037411 (-0.019092) | 0.062993 / 0.014526 (0.048467) | 0.074313 / 0.176557 (-0.102244) | 0.123089 / 0.737135 (-0.614046) | 0.075177 / 0.296338 (-0.221162) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.268584 / 0.215209 (0.053375) | 2.633116 / 2.077655 (0.555461) | 1.390743 / 1.504120 (-0.113377) | 1.277385 / 1.541195 (-0.263810) | 1.287934 / 1.468490 (-0.180556) | 0.387934 / 4.584777 (-4.196843) | 2.345819 / 3.745712 (-1.399893) | 2.558169 / 5.269862 (-2.711693) | 1.569812 / 4.565676 (-2.995865) | 0.045297 / 0.424275 (-0.378978) | 0.005238 / 0.007607 (-0.002369) | 0.359704 / 0.226044 (0.133659) | 3.204688 / 2.268929 (0.935759) | 1.753321 / 55.444624 (-53.691303) | 1.492223 / 6.876477 (-5.384254) | 1.498207 / 2.142072 (-0.643865) | 0.459830 / 4.805227 (-4.345397) | 0.098194 / 6.500664 (-6.402470) | 0.042632 / 0.075469 (-0.032837) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963020 / 1.841788 (-0.878768) | 11.500470 / 8.074308 (3.426161) | 10.451882 / 10.191392 (0.260490) | 0.127706 / 0.680424 (-0.552718) | 0.014084 / 0.534201 (-0.520117) | 0.269728 / 0.579283 (-0.309555) | 0.260283 / 0.434364 (-0.174080) | 0.303717 / 0.540337 (-0.236620) | 0.397028 / 1.386936 (-0.989908) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004823 / 0.011353 (-0.006529) | 0.002751 / 0.011008 (-0.008257) | 0.048719 / 0.038508 (0.010211) | 0.051409 / 0.023109 (0.028300) | 0.267139 / 0.275898 (-0.008759) | 0.287659 / 0.323480 (-0.035821) | 0.003959 / 0.007986 (-0.004027) | 0.002376 / 0.004328 (-0.001953) | 0.047942 / 0.004250 (0.043692) | 0.039742 / 0.037052 (0.002690) | 0.268348 / 0.258489 (0.009859) | 0.297201 / 0.293841 (0.003360) | 0.024226 / 0.128546 (-0.104320) | 0.007103 / 0.075646 (-0.068544) | 0.053310 / 0.419271 (-0.365961) | 0.032716 / 0.043533 (-0.010816) | 0.269469 / 0.255139 (0.014330) | 0.287752 / 0.283200 (0.004553) | 0.018191 / 0.141683 (-0.123492) | 1.114086 / 1.452155 (-0.338069) | 1.188054 / 1.492716 (-0.304662) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091072 / 0.018006 (0.073066) | 0.300367 / 0.000490 (0.299877) | 0.000218 / 0.000200 (0.000018) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020970 / 0.037411 (-0.016441) | 0.070356 / 0.014526 (0.055830) | 0.081339 / 0.176557 (-0.095218) | 0.120741 / 0.737135 (-0.616394) | 0.081677 / 0.296338 (-0.214662) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290405 / 0.215209 (0.075196) | 2.863877 / 2.077655 (0.786222) | 1.524603 / 1.504120 (0.020483) | 1.397917 / 1.541195 (-0.143278) | 1.402635 / 1.468490 (-0.065855) | 0.405525 / 4.584777 (-4.179252) | 2.432474 / 3.745712 (-1.313239) | 2.446277 / 5.269862 (-2.823585) | 1.550300 / 4.565676 (-3.015377) | 0.046545 / 0.424275 (-0.377730) | 0.004824 / 0.007607 (-0.002783) | 0.343578 / 0.226044 (0.117534) | 3.436850 / 2.268929 (1.167922) | 1.897200 / 55.444624 (-53.547425) | 1.625222 / 6.876477 (-5.251255) | 1.730488 / 2.142072 (-0.411585) | 0.482099 / 4.805227 (-4.323129) | 0.097828 / 6.500664 (-6.402836) | 0.040385 / 0.075469 (-0.035084) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.950975 / 1.841788 (-0.890812) | 11.875024 / 8.074308 (3.800715) | 10.430301 / 10.191392 (0.238909) | 0.130546 / 0.680424 (-0.549878) | 0.015423 / 0.534201 (-0.518778) | 0.269592 / 0.579283 (-0.309691) | 0.282505 / 0.434364 (-0.151859) | 0.305567 / 0.540337 (-0.234771) | 0.522142 / 1.386936 (-0.864794) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c166692aa955528180dd4d55474a984f6044896d \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004983 / 0.011353 (-0.006369) | 0.003346 / 0.011008 (-0.007662) | 0.062233 / 0.038508 (0.023725) | 0.050246 / 0.023109 (0.027137) | 0.305738 / 0.275898 (0.029839) | 0.321863 / 0.323480 (-0.001617) | 0.003870 / 0.007986 (-0.004116) | 0.002610 / 0.004328 (-0.001718) | 0.047734 / 0.004250 (0.043483) | 0.037611 / 0.037052 (0.000559) | 0.299121 / 0.258489 (0.040632) | 0.327370 / 0.293841 (0.033529) | 0.027009 / 0.128546 (-0.101537) | 0.010816 / 0.075646 (-0.064830) | 0.204627 / 0.419271 (-0.214645) | 0.035708 / 0.043533 (-0.007825) | 0.291837 / 0.255139 (0.036698) | 0.313646 / 0.283200 (0.030447) | 0.017277 / 0.141683 (-0.124405) | 1.097907 / 1.452155 (-0.354248) | 1.163203 / 1.492716 (-0.329513) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091933 / 0.018006 (0.073926) | 0.298787 / 0.000490 (0.298297) | 0.000204 / 0.000200 (0.000004) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018349 / 0.037411 (-0.019062) | 0.061520 / 0.014526 (0.046994) | 0.073159 / 0.176557 (-0.103397) | 0.118657 / 0.737135 (-0.618478) | 0.073601 / 0.296338 (-0.222737) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.276297 / 0.215209 (0.061088) | 2.725668 / 2.077655 (0.648013) | 1.458079 / 1.504120 (-0.046041) | 1.331236 / 1.541195 (-0.209959) | 1.347919 / 1.468490 (-0.120571) | 0.565954 / 4.584777 (-4.018823) | 2.380883 / 3.745712 (-1.364829) | 2.800533 / 5.269862 (-2.469329) | 1.740534 / 4.565676 (-2.825142) | 0.065617 / 0.424275 (-0.358658) | 0.004907 / 0.007607 (-0.002700) | 0.335973 / 0.226044 (0.109929) | 3.337405 / 2.268929 (1.068476) | 1.819852 / 55.444624 (-53.624772) | 1.542724 / 6.876477 (-5.333752) | 1.509508 / 2.142072 (-0.632565) | 0.648618 / 4.805227 (-4.156609) | 0.116812 / 6.500664 (-6.383852) | 0.041561 / 0.075469 (-0.033909) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.943488 / 1.841788 (-0.898299) | 11.184770 / 8.074308 (3.110462) | 10.406311 / 10.191392 (0.214919) | 0.129841 / 0.680424 (-0.550583) | 0.013736 / 0.534201 (-0.520465) | 0.287281 / 0.579283 (-0.292002) | 0.267403 / 0.434364 (-0.166961) | 0.325319 / 0.540337 (-0.215019) | 0.454207 / 1.386936 (-0.932729) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005169 / 0.011353 (-0.006183) | 0.003155 / 0.011008 (-0.007854) | 0.048101 / 0.038508 (0.009593) | 0.048726 / 0.023109 (0.025617) | 0.275768 / 0.275898 (-0.000130) | 0.291209 / 0.323480 (-0.032271) | 0.003984 / 0.007986 (-0.004001) | 0.002586 / 0.004328 (-0.001742) | 0.047751 / 0.004250 (0.043500) | 0.040176 / 0.037052 (0.003124) | 0.279161 / 0.258489 (0.020672) | 0.297371 / 0.293841 (0.003530) | 0.028502 / 0.128546 (-0.100044) | 0.010103 / 0.075646 (-0.065544) | 0.056920 / 0.419271 (-0.362351) | 0.032174 / 0.043533 (-0.011359) | 0.271925 / 0.255139 (0.016786) | 0.289572 / 0.283200 (0.006372) | 0.017981 / 0.141683 (-0.123702) | 1.192972 / 1.452155 (-0.259183) | 1.223231 / 1.492716 (-0.269485) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091363 / 0.018006 (0.073356) | 0.298106 / 0.000490 (0.297616) | 0.000216 / 0.000200 (0.000016) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021509 / 0.037411 (-0.015902) | 0.068377 / 0.014526 (0.053851) | 0.079798 / 0.176557 (-0.096759) | 0.120546 / 0.737135 (-0.616589) | 0.080602 / 0.296338 (-0.215737) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.300809 / 0.215209 (0.085600) | 2.921144 / 2.077655 (0.843489) | 1.621096 / 1.504120 (0.116976) | 1.504265 / 1.541195 (-0.036930) | 1.508050 / 1.468490 (0.039560) | 0.554291 / 4.584777 (-4.030486) | 2.418798 / 3.745712 (-1.326914) | 2.768088 / 5.269862 (-2.501773) | 1.728267 / 4.565676 (-2.837410) | 0.062943 / 0.424275 (-0.361332) | 0.004891 / 0.007607 (-0.002716) | 0.350298 / 0.226044 (0.124254) | 3.442782 / 2.268929 (1.173853) | 1.960163 / 55.444624 (-53.484461) | 1.682000 / 6.876477 (-5.194477) | 1.680311 / 2.142072 (-0.461761) | 0.631201 / 4.805227 (-4.174026) | 0.115211 / 6.500664 (-6.385453) | 0.041279 / 0.075469 (-0.034190) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.962478 / 1.841788 (-0.879310) | 11.671463 / 8.074308 (3.597155) | 10.640129 / 10.191392 (0.448737) | 0.130649 / 0.680424 (-0.549775) | 0.016169 / 0.534201 (-0.518032) | 0.286894 / 0.579283 (-0.292389) | 0.269319 / 0.434364 (-0.165045) | 0.324512 / 0.540337 (-0.215825) | 0.550874 / 1.386936 (-0.836062) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#69f135121beb1616f1d7c7584b317d4e41e21275 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005078 / 0.011353 (-0.006275) | 0.003950 / 0.011008 (-0.007058) | 0.063345 / 0.038508 (0.024837) | 0.054486 / 0.023109 (0.031377) | 0.243213 / 0.275898 (-0.032685) | 0.264079 / 0.323480 (-0.059401) | 0.003922 / 0.007986 (-0.004064) | 0.002631 / 0.004328 (-0.001698) | 0.048660 / 0.004250 (0.044409) | 0.037205 / 0.037052 (0.000153) | 0.244577 / 0.258489 (-0.013912) | 0.276025 / 0.293841 (-0.017816) | 0.027134 / 0.128546 (-0.101412) | 0.010921 / 0.075646 (-0.064726) | 0.209792 / 0.419271 (-0.209479) | 0.035999 / 0.043533 (-0.007534) | 0.245671 / 0.255139 (-0.009468) | 0.262807 / 0.283200 (-0.020393) | 0.018173 / 0.141683 (-0.123510) | 1.084417 / 1.452155 (-0.367738) | 1.148284 / 1.492716 (-0.344432) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093128 / 0.018006 (0.075122) | 0.301606 / 0.000490 (0.301117) | 0.000221 / 0.000200 (0.000021) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018718 / 0.037411 (-0.018693) | 0.060819 / 0.014526 (0.046293) | 0.073050 / 0.176557 (-0.103507) | 0.120043 / 0.737135 (-0.617092) | 0.075374 / 0.296338 (-0.220965) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291080 / 0.215209 (0.075871) | 2.808802 / 2.077655 (0.731148) | 1.485686 / 1.504120 (-0.018434) | 1.354356 / 1.541195 (-0.186839) | 1.347863 / 1.468490 (-0.120627) | 0.571501 / 4.584777 (-4.013276) | 2.377960 / 3.745712 (-1.367752) | 2.768023 / 5.269862 (-2.501839) | 1.754360 / 4.565676 (-2.811316) | 0.063115 / 0.424275 (-0.361160) | 0.004941 / 0.007607 (-0.002666) | 0.338281 / 0.226044 (0.112237) | 3.340587 / 2.268929 (1.071658) | 1.849479 / 55.444624 (-53.595145) | 1.551846 / 6.876477 (-5.324631) | 1.539090 / 2.142072 (-0.602983) | 0.644522 / 4.805227 (-4.160705) | 0.117398 / 6.500664 (-6.383266) | 0.042239 / 0.075469 (-0.033230) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.949496 / 1.841788 (-0.892291) | 11.548352 / 8.074308 (3.474044) | 10.478065 / 10.191392 (0.286673) | 0.129534 / 0.680424 (-0.550890) | 0.015378 / 0.534201 (-0.518822) | 0.287221 / 0.579283 (-0.292062) | 0.262944 / 0.434364 (-0.171419) | 0.321727 / 0.540337 (-0.218611) | 0.432354 / 1.386936 (-0.954582) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005256 / 0.011353 (-0.006097) | 0.003491 / 0.011008 (-0.007517) | 0.048647 / 0.038508 (0.010139) | 0.054011 / 0.023109 (0.030901) | 0.271786 / 0.275898 (-0.004112) | 0.291964 / 0.323480 (-0.031516) | 0.004035 / 0.007986 (-0.003950) | 0.002671 / 0.004328 (-0.001657) | 0.048108 / 0.004250 (0.043857) | 0.040421 / 0.037052 (0.003368) | 0.278594 / 0.258489 (0.020105) | 0.300707 / 0.293841 (0.006867) | 0.028924 / 0.128546 (-0.099623) | 0.010600 / 0.075646 (-0.065047) | 0.057649 / 0.419271 (-0.361623) | 0.034221 / 0.043533 (-0.009312) | 0.276692 / 0.255139 (0.021553) | 0.293545 / 0.283200 (0.010345) | 0.017908 / 0.141683 (-0.123775) | 1.135108 / 1.452155 (-0.317047) | 1.190823 / 1.492716 (-0.301893) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095243 / 0.018006 (0.077237) | 0.301885 / 0.000490 (0.301396) | 0.000235 / 0.000200 (0.000035) | 0.000056 / 0.000054 (0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021561 / 0.037411 (-0.015850) | 0.069054 / 0.014526 (0.054529) | 0.080466 / 0.176557 (-0.096091) | 0.121323 / 0.737135 (-0.615812) | 0.081891 / 0.296338 (-0.214448) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293957 / 0.215209 (0.078748) | 2.869035 / 2.077655 (0.791380) | 1.608837 / 1.504120 (0.104717) | 1.440594 / 1.541195 (-0.100601) | 1.464775 / 1.468490 (-0.003715) | 0.565663 / 4.584777 (-4.019114) | 2.439456 / 3.745712 (-1.306256) | 2.794775 / 5.269862 (-2.475087) | 1.750026 / 4.565676 (-2.815651) | 0.063291 / 0.424275 (-0.360984) | 0.004930 / 0.007607 (-0.002677) | 0.347169 / 0.226044 (0.121125) | 3.408260 / 2.268929 (1.139331) | 1.920933 / 55.444624 (-53.523691) | 1.648821 / 6.876477 (-5.227656) | 1.639022 / 2.142072 (-0.503051) | 0.642870 / 4.805227 (-4.162357) | 0.117077 / 6.500664 (-6.383587) | 0.040784 / 0.075469 (-0.034685) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.993501 / 1.841788 (-0.848287) | 12.012423 / 8.074308 (3.938115) | 10.740932 / 10.191392 (0.549540) | 0.132409 / 0.680424 (-0.548015) | 0.015294 / 0.534201 (-0.518907) | 0.287902 / 0.579283 (-0.291381) | 0.281350 / 0.434364 (-0.153014) | 0.329201 / 0.540337 (-0.211137) | 0.553199 / 1.386936 (-0.833737) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ecd3a22c5dec2133491a320515e12956512439eb \"CML watermark\")\n" ]
2023-11-15T19:06:42Z
2023-12-01T15:37:32Z
2023-12-01T15:31:19Z
COLLABORATOR
null
null
null
While fixing the Windows errors in #6362, I noticed that `PermissionError` can still easily be thrown on the session exit by the temporary cache directory's finalizer (we would also have to keep track of intermediate datasets, copies, etc.). ~~Due to the low usage of `datasets` on Windows, this PR takes a simpler approach to the issue than https://github.com/huggingface/datasets/pull/2403 - it tries to delete the temporary cache directory, and if this fails, logs a warning message about using a `delete-temp-cache` CLI command to delete it manually. The problematic references are freed after the session exits, so the CLI command should then succeed.~~ This PR implements `Dataset.__setstate__` to register datasets with temporary cache files for deletion.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6426/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6426/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6426.diff", "html_url": "https://github.com/huggingface/datasets/pull/6426", "merged_at": "2023-12-01T15:31:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/6426.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6426" }
https://api.github.com/repos/huggingface/datasets/issues/6014
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6014/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6014/comments
https://api.github.com/repos/huggingface/datasets/issues/6014/events
https://github.com/huggingface/datasets/issues/6014
1,798,213,816
I_kwDODunzps5rLpC4
6,014
Request to Share/Update Dataset Viewer Code
{ "avatar_url": "https://avatars.githubusercontent.com/u/105081034?v=4", "events_url": "https://api.github.com/users/lilyorlilypad/events{/privacy}", "followers_url": "https://api.github.com/users/lilyorlilypad/followers", "following_url": "https://api.github.com/users/lilyorlilypad/following{/other_user}", "gists_url": "https://api.github.com/users/lilyorlilypad/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lilyorlilypad", "id": 105081034, "login": "lilyorlilypad", "node_id": "U_kgDOBkNoyg", "organizations_url": "https://api.github.com/users/lilyorlilypad/orgs", "received_events_url": "https://api.github.com/users/lilyorlilypad/received_events", "repos_url": "https://api.github.com/users/lilyorlilypad/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lilyorlilypad/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lilyorlilypad/subscriptions", "type": "User", "url": "https://api.github.com/users/lilyorlilypad", "user_view_type": "public" }
[ { "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists", "id": 1935892865, "name": "duplicate", "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate" } ]
closed
false
null
[]
null
[ "Hi ! The huggingface/dataset-viewer code was not maintained anymore because we switched to a new dataset viewer that is deployed available for each dataset the Hugging Face website.\r\n\r\nWhat are you using this old repository for ?", "I think these parts are outdated:\r\n\r\n* https://github.com/huggingface/datasets-viewer/blob/8efad8eae313a891f713469983bf4c744786f26e/run.py#L126-L131\r\n* https://github.com/huggingface/datasets-viewer/blob/8efad8eae313a891f713469983bf4c744786f26e/run.py#L145-L150\r\n\r\nTo make the viewer work, the first one should be replaced with the following:\r\n```python\r\ndataset_module = datasets.load.dataset_module_factory(path)\r\nbuilder_cls = datasets.load.import_main_class(dataset_module.module_path)\r\nconfs = builder_cls.BUILDER_CONFIGS\r\n```\r\nAnd the second one:\r\n```python\r\ndataset_module = datasets.load.dataset_module_factory(path)\r\nbuilder_cls = datasets.load.import_main_class(dataset_module.module_path)\r\nif conf:\r\n builder_instance = builder_cls(name=conf, cache_dir=path if path_to_datasets is not None else None)\r\nelse:\r\n builder_instance = builder_cls(cache_dir=path if path_to_datasets is not None else None)\r\n```\r\n\r\nBut as @lhoestq suggested, it's better to use the `datasets-server` API nowadays to [fetch the rows](https://huggingface.co/docs/datasets-server/rows).", "> The dataset viewer on the Hugging Face website is incredibly useful\r\n\r\n@mariosasko i think @lilyorlilypad wants to run the new dataset-viewer, not the old one", "> wants to run the new dataset-viewer, not the old one\r\n\r\nThanks for the clarification for me. I do want to run the new dataset-viewer. ", "It should be possible to run it locally using the HF datasets-server API (docs [here](https://huggingface.co/docs/datasets-server)) but the front end part is not open source (yet ?)\r\n\r\nThe back-end is open source though if you're interested: https://github.com/huggingface/datasets-server\r\nIt automatically converts datasets on HF to Parquet, which is the format we use to power the viewer.", "the new frontend would probably be hard to open source, as is, as it's quite intertwined with the Hub's code.\r\n\r\nHowever, at some point it would be amazing to have a community-driven open source implementation of a frontend to datasets-server! ", "For the frontend viewer, see https://github.com/huggingface/datasets/issues/6139.\r\n\r\nAlso mentioned in https://github.com/huggingface/datasets-server/issues/213 and https://github.com/huggingface/datasets-server/issues/441\r\n\r\nClosing as a duplicate of https://github.com/huggingface/datasets/issues/6139", "Hi team,\r\n\r\nI'm currently researching the Dataset Viewer project and would like to understand more about the frontend technologies used. Specifically, I'm interested in knowing:\r\n\r\nWhich frontend framework is being utilized (e.g., React, Vue, etc.)?\r\nAre there any specific libraries or components being used for UI (e.g., Material-UI, Ant Design)?\r\nAny other notable frontend tools or technologies that are part of this project?\r\nYour assistance in providing these details would be greatly appreciated. Thank you for your time and effort!\r\n\r\nBest regards", "@jacob-rodgers-max we use https://svelte.dev/", "> @jacob-rodgers-max we use https://svelte.dev/\r\n\r\nThank you very much for your prompt and detailed response!" ]
2023-07-11T06:36:09Z
2024-07-20T07:29:08Z
2023-09-25T12:01:17Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
Overview: The repository (huggingface/datasets-viewer) was recently archived and when I tried to run the code, there was the error message "AttributeError: module 'datasets.load' has no attribute 'prepare_module'". I could not resolve the issue myself due to lack of documentation of that attribute. Request: I kindly request the sharing of the code responsible for the dataset preview functionality or help with resolving the error. The dataset viewer on the Hugging Face website is incredibly useful since it is compatible with different types of inputs. It allows users to find datasets that meet their needs more efficiently. If needed, I am willing to contribute to the project by testing, documenting, and providing feedback on the dataset viewer code. Thank you for considering this request, and I look forward to your response.
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6014/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6014/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6668
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6668/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6668/comments
https://api.github.com/repos/huggingface/datasets/issues/6668/events
https://github.com/huggingface/datasets/issues/6668
2,137,859,935
I_kwDODunzps5_bSdf
6,668
Chapter 6 - Issue Loading `cnn_dailymail` dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/34660389?v=4", "events_url": "https://api.github.com/users/hariravichandran/events{/privacy}", "followers_url": "https://api.github.com/users/hariravichandran/followers", "following_url": "https://api.github.com/users/hariravichandran/following{/other_user}", "gists_url": "https://api.github.com/users/hariravichandran/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hariravichandran", "id": 34660389, "login": "hariravichandran", "node_id": "MDQ6VXNlcjM0NjYwMzg5", "organizations_url": "https://api.github.com/users/hariravichandran/orgs", "received_events_url": "https://api.github.com/users/hariravichandran/received_events", "repos_url": "https://api.github.com/users/hariravichandran/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hariravichandran/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hariravichandran/subscriptions", "type": "User", "url": "https://api.github.com/users/hariravichandran", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2024-02-16T04:40:56Z
2024-02-16T04:40:56Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug So I am getting this bug when I try to run cell 4 of the Chapter 6 notebook code: `dataset = load_dataset("ccdv/cnn_dailymail", version="3.0.0")` Error Message: ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[4], line 4 1 #hide_output 2 from datasets import load_dataset ----> 4 dataset = load_dataset("ccdv/cnn_dailymail", version="3.0.0") 7 # dataset = load_dataset("ccdv/cnn_dailymail", version="3.0.0", trust_remote_code=True) 8 print(f"Features: {dataset['train'].column_names}") File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\load.py:2587, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs) 2583 # Build dataset for splits 2584 keep_in_memory = ( 2585 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 2586 ) -> 2587 ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) 2588 # Rename and cast features to match task schema 2589 if task is not None: 2590 # To avoid issuing the same warning twice File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\builder.py:1244, in DatasetBuilder.as_dataset(self, split, run_post_process, verification_mode, ignore_verifications, in_memory) 1241 verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) 1243 # Create a dataset for each of the given splits -> 1244 datasets = map_nested( 1245 partial( 1246 self._build_single_dataset, 1247 run_post_process=run_post_process, 1248 verification_mode=verification_mode, 1249 in_memory=in_memory, 1250 ), 1251 split, 1252 map_tuple=True, 1253 disable_tqdm=True, 1254 ) 1255 if isinstance(datasets, dict): 1256 datasets = DatasetDict(datasets) File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\utils\py_utils.py:477, in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, types, disable_tqdm, desc) 466 mapped = [ 467 map_nested( 468 function=function, (...) 474 for obj in iterable 475 ] 476 elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: --> 477 mapped = [ 478 _single_map_nested((function, obj, types, None, True, None)) 479 for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) 480 ] 481 else: 482 with warnings.catch_warnings(): File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\utils\py_utils.py:478, in <listcomp>(.0) 466 mapped = [ 467 map_nested( 468 function=function, (...) 474 for obj in iterable 475 ] 476 elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: 477 mapped = [ --> 478 _single_map_nested((function, obj, types, None, True, None)) 479 for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) 480 ] 481 else: 482 with warnings.catch_warnings(): File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\utils\py_utils.py:370, in _single_map_nested(args) 368 # Singleton first to spare some computation 369 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 370 return function(data_struct) 372 # Reduce logging to keep things readable in multiprocessing with tqdm 373 if rank is not None and logging.get_verbosity() < logging.WARNING: File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\builder.py:1274, in DatasetBuilder._build_single_dataset(self, split, run_post_process, verification_mode, in_memory) 1271 split = Split(split) 1273 # Build base dataset -> 1274 ds = self._as_dataset( 1275 split=split, 1276 in_memory=in_memory, 1277 ) 1278 if run_post_process: 1279 for resource_file_name in self._post_processing_resources(split).values(): File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\builder.py:1348, in DatasetBuilder._as_dataset(self, split, in_memory) 1346 if self._check_legacy_cache(): 1347 dataset_name = self.name -> 1348 dataset_kwargs = ArrowReader(cache_dir, self.info).read( 1349 name=dataset_name, 1350 instructions=split, 1351 split_infos=self.info.splits.values(), 1352 in_memory=in_memory, 1353 ) 1354 fingerprint = self._get_dataset_fingerprint(split) 1355 return Dataset(fingerprint=fingerprint, **dataset_kwargs) File ~\anaconda3\envs\nlp-transformers\lib\site-packages\datasets\arrow_reader.py:254, in BaseReader.read(self, name, instructions, split_infos, in_memory) 252 if not files: 253 msg = f'Instruction "{instructions}" corresponds to no data!' --> 254 raise ValueError(msg) 255 return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) **ValueError: Instruction "validation" corresponds to no data!** ```` Looks like the data is not being loaded. Any advice would be appreciated. Thanks! ### Steps to reproduce the bug Run all cells of Chapter 6 notebook. ### Expected behavior Data should load correctly without any errors. ### Environment info - `datasets` version: 2.17.0 - Platform: Windows-10-10.0.19045-SP0 - Python version: 3.9.18 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.0 - `fsspec` version: 2023.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6668/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6668/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5123
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5123/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5123/comments
https://api.github.com/repos/huggingface/datasets/issues/5123/events
https://github.com/huggingface/datasets/issues/5123
1,410,828,756
I_kwDODunzps5UF4nU
5,123
datasets freezes with streaming mode in multiple-gpu
{ "avatar_url": "https://avatars.githubusercontent.com/u/59409879?v=4", "events_url": "https://api.github.com/users/jackfeinmann5/events{/privacy}", "followers_url": "https://api.github.com/users/jackfeinmann5/followers", "following_url": "https://api.github.com/users/jackfeinmann5/following{/other_user}", "gists_url": "https://api.github.com/users/jackfeinmann5/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jackfeinmann5", "id": 59409879, "login": "jackfeinmann5", "node_id": "MDQ6VXNlcjU5NDA5ODc5", "organizations_url": "https://api.github.com/users/jackfeinmann5/orgs", "received_events_url": "https://api.github.com/users/jackfeinmann5/received_events", "repos_url": "https://api.github.com/users/jackfeinmann5/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jackfeinmann5/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jackfeinmann5/subscriptions", "type": "User", "url": "https://api.github.com/users/jackfeinmann5", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "@lhoestq I tested the script without accelerator, and I confirm this is due to datasets part as this gets similar results without accelerator.", "Hi ! You said it works on 1 GPU but doesn't wortk without accelerator - what's the difference between running on 1 GPU and running without accelerator in your case ?", "Hi @lhoestq \r\nthanks for coming back to me. Sorry for the confusion I made. I meant this works fine on 1 GPU, but on multi-gpu it is freezing. \"accelerator\" is not an issue as if you adapt the code without accelerator this still gets the same issue.\r\nIn order to test it. Please run \"accelerate config\", then use the setup for multi-gpu in one node.\r\nAfter that run \"accelerate launch code.py\" and then you would see the freezing occurs.", "Hi @lhoestq \r\ncould you have the chance to reproduce the error by running the minimal example shared?\r\nthanks", "I think you need to do `train_dataset = train_dataset.with_format(\"torch\")` to work with the DataLoader in a multiprocessing setup :)\r\n\r\nThe hang is probably caused by our streamign lib `fsspec` which doesn't work in multiprocessing out of the box - but we made it work with the PyTorch DataLoader when the dataset format is set to \"torch\"", "Hi @lhoestq \r\nthanks for the response. I added the line suggested right before calling `with accelerator.main_process_first():` in the code above and I confirm this also freezes. to reproduce it please run \"accelerate launch code.py\". I was wondering if you could have more suggestions for me? I do not have an idea how to fix this or debug this freezing. many thanks.", "Maybe the `fsspec` stuff need to be clearer even before - can you try to run this function at the very beginning of your script ?\r\n```python\r\nimport fsspec\r\n\r\ndef _set_fsspec_for_multiprocess() -> None:\r\n \"\"\"\r\n Clear reference to the loop and thread.\r\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\r\n Only required for fsspec >= 0.9.0\r\n See https://github.com/fsspec/gcsfs/issues/379\r\n \"\"\"\r\n fsspec.asyn.iothread[0] = None\r\n fsspec.asyn.loop[0] = None\r\n\r\n_set_fsspec_for_multiprocess()\r\n```", "Hi @lhoestq \r\nthank you. I tried it, I am getting `AttributeError: module 'fsspec' has no attribute 'asyn'`. which version of fsspect do you use?\r\nI am using \r\n```fsspec 2022.8.2 pypi_0 pypi```\r\nthank you.", "Hi @lhoestq \r\nI solved `fsspec` error with this hack for now https://discuss.huggingface.co/t/attributeerror-module-fsspec-has-no-attribute-asyn/19255 but this is still freezing, I greatly appreciate if you could run this script on your side. Many thanks.\r\n\r\n```\r\nimport fsspec\r\n\r\ndef _set_fsspec_for_multiprocess() -> None:\r\n \"\"\"\r\n Clear reference to the loop and thread.\r\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\r\n Only required for fsspec >= 0.9.0\r\n See https://github.com/fsspec/gcsfs/issues/379\r\n \"\"\"\r\n fsspec.asyn.iothread[0] = None\r\n fsspec.asyn.loop[0] = None\r\n\r\n\r\n_set_fsspec_for_multiprocess()\r\n\r\nfrom accelerate import Accelerator\r\nfrom accelerate.logging import get_logger\r\nfrom datasets import load_dataset\r\nfrom torch.utils.data.dataloader import DataLoader\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom transformers import AutoTokenizer\r\nimport torch\r\nfrom accelerate.logging import get_logger\r\nfrom torch.utils.data import IterableDataset\r\nfrom torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\r\n\r\n\r\nlogger = get_logger(__name__)\r\n\r\n\r\nclass ConstantLengthDataset(IterableDataset):\r\n \"\"\"\r\n Iterable dataset that returns constant length chunks of tokens from stream of text files.\r\n Args:\r\n tokenizer (Tokenizer): The processor used for proccessing the data.\r\n dataset (dataset.Dataset): Dataset with text files.\r\n infinite (bool): If True the iterator is reset after dataset reaches end else stops.\r\n max_seq_length (int): Length of token sequences to return.\r\n num_of_sequences (int): Number of token sequences to keep in buffer.\r\n chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n tokenizer,\r\n dataset,\r\n infinite=False,\r\n max_seq_length=1024,\r\n num_of_sequences=1024,\r\n chars_per_token=3.6,\r\n ):\r\n self.tokenizer = tokenizer\r\n # self.concat_token_id = tokenizer.bos_token_id\r\n self.dataset = dataset\r\n self.max_seq_length = max_seq_length\r\n self.epoch = 0\r\n self.infinite = infinite\r\n self.current_size = 0\r\n self.max_buffer_size = max_seq_length * chars_per_token * num_of_sequences\r\n self.content_field = \"text\"\r\n\r\n def __iter__(self):\r\n iterator = iter(self.dataset)\r\n more_examples = True\r\n while more_examples:\r\n buffer, buffer_len = [], 0\r\n while True:\r\n if buffer_len >= self.max_buffer_size:\r\n break\r\n try:\r\n buffer.append(next(iterator)[self.content_field])\r\n buffer_len += len(buffer[-1])\r\n except StopIteration:\r\n if self.infinite:\r\n iterator = iter(self.dataset)\r\n self.epoch += 1\r\n logger.info(f\"Dataset epoch: {self.epoch}\")\r\n else:\r\n more_examples = False\r\n break\r\n tokenized_inputs = self.tokenizer(buffer, truncation=False)[\"input_ids\"]\r\n all_token_ids = []\r\n for tokenized_input in tokenized_inputs:\r\n all_token_ids.extend(tokenized_input)\r\n for i in range(0, len(all_token_ids), self.max_seq_length):\r\n input_ids = all_token_ids[i : i + self.max_seq_length]\r\n if len(input_ids) == self.max_seq_length:\r\n self.current_size += 1\r\n yield torch.tensor(input_ids)\r\n\r\n def shuffle(self, buffer_size=1000):\r\n return ShufflerIterDataPipe(self, buffer_size=buffer_size)\r\n\r\n\r\ndef create_dataloaders(tokenizer, accelerator):\r\n ds_kwargs = {\"streaming\": True}\r\n # In distributed training, the load_dataset function gaurantees that only one process\r\n # can concurrently download the dataset.\r\n datasets = load_dataset(\r\n \"c4\",\r\n \"en\",\r\n cache_dir=\"cache_dir\",\r\n **ds_kwargs,\r\n )\r\n train_data, valid_data = datasets[\"train\"], datasets[\"validation\"]\r\n with accelerator.main_process_first():\r\n train_data = train_data.shuffle(buffer_size=10000, seed=None)\r\n train_dataset = ConstantLengthDataset(\r\n tokenizer,\r\n train_data,\r\n infinite=True,\r\n max_seq_length=256,\r\n )\r\n valid_dataset = ConstantLengthDataset(\r\n tokenizer,\r\n valid_data,\r\n infinite=False,\r\n max_seq_length=256,\r\n )\r\n train_dataset = train_dataset.shuffle(buffer_size=10000)\r\n train_dataloader = DataLoader(train_dataset, batch_size=160, shuffle=True)\r\n eval_dataloader = DataLoader(valid_dataset, batch_size=160)\r\n return train_dataloader, eval_dataloader\r\n\r\n\r\ndef main():\r\n # Accelerator.\r\n logging_dir = \"data_save_dir/log\"\r\n accelerator = Accelerator(\r\n gradient_accumulation_steps=1,\r\n mixed_precision=\"bf16\",\r\n log_with=\"tensorboard\",\r\n logging_dir=logging_dir,\r\n )\r\n # We need to initialize the trackers we use, and also store our configuration.\r\n # The trackers initializes automatically on the main process.\r\n if accelerator.is_main_process:\r\n accelerator.init_trackers(\"test\")\r\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\r\n\r\n # Load datasets and create dataloaders.\r\n train_dataloader, _ = create_dataloaders(tokenizer, accelerator)\r\n\r\n train_dataloader = accelerator.prepare(train_dataloader)\r\n for step, batch in enumerate(train_dataloader, start=1):\r\n print(step)\r\n accelerator.end_training()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```", "Are you using `Pytorch 1.11`? Otherwise the script freezes because of the shuffling in this line: \r\n```\r\n return ShufflerIterDataPipe(self, buffer_size=buffer_size)\r\n```\r\n`ShufflerIterDataPipe` behavior must have changed for newer Pytorch versions. But this doesn't change whether you're using streaming or not in `datasets`, so probably not the same issue, but something to try.", "> Are you using `Pytorch 1.11`? Otherwise the script freezes because of the shuffling in this line:\r\n> \r\n> ```\r\n> return ShufflerIterDataPipe(self, buffer_size=buffer_size)\r\n> ```\r\n> \r\n> `ShufflerIterDataPipe` behavior must have changed for newer Pytorch versions. But this doesn't change whether you're using streaming or not in `datasets`, so probably not the same issue, but something to try.\r\n\r\nI met the same issue for pytorch 1.12 and 1.13, is there a way to work around for this function for newer pytorch versions?" ]
2022-10-17T03:28:16Z
2023-05-14T06:55:20Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
## Describe the bug Hi. I am using this dataloader, which is for processing large datasets in streaming mode mentioned in one of examples of huggingface. I am using it to read c4: https://github.com/huggingface/transformers/blob/b48ac1a094e572d6076b46a9e4ed3e0ebe978afc/examples/research_projects/codeparrot/scripts/codeparrot_training.py#L22 During using multi-gpu in accelerator in one node, the code freezes, but works for 1 GPU: ``` 10/16/2022 14:18:46 - INFO - datasets.info - Loading Dataset Infos from /home/jack/.cache/huggingface/modules/datasets_modules/datasets/c4/df532b158939272d032cc63ef19cd5b83e9b4d00c922b833e4cb18b2e9869b01 Steps: 0%| | 0/400000 [00:00<?, ?it/s]10/16/2022 14:18:47 - INFO - torch.utils.data.dataloader - Shared seed (135290893754684706) sent to store on rank 0 ``` # Code to reproduce please run this code with `accelerate launch code.py` ``` from accelerate import Accelerator from accelerate.logging import get_logger from datasets import load_dataset from torch.utils.data.dataloader import DataLoader import torch from datasets import load_dataset from transformers import AutoTokenizer import torch from accelerate.logging import get_logger from torch.utils.data import IterableDataset from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe logger = get_logger(__name__) class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. Args: tokenizer (Tokenizer): The processor used for proccessing the data. dataset (dataset.Dataset): Dataset with text files. infinite (bool): If True the iterator is reset after dataset reaches end else stops. max_seq_length (int): Length of token sequences to return. num_of_sequences (int): Number of token sequences to keep in buffer. chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer. """ def __init__( self, tokenizer, dataset, infinite=False, max_seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, ): self.tokenizer = tokenizer # self.concat_token_id = tokenizer.bos_token_id self.dataset = dataset self.max_seq_length = max_seq_length self.epoch = 0 self.infinite = infinite self.current_size = 0 self.max_buffer_size = max_seq_length * chars_per_token * num_of_sequences self.content_field = "text" def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(next(iterator)[self.content_field]) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) self.epoch += 1 logger.info(f"Dataset epoch: {self.epoch}") else: more_examples = False break tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"] all_token_ids = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input) for i in range(0, len(all_token_ids), self.max_seq_length): input_ids = all_token_ids[i : i + self.max_seq_length] if len(input_ids) == self.max_seq_length: self.current_size += 1 yield torch.tensor(input_ids) def shuffle(self, buffer_size=1000): return ShufflerIterDataPipe(self, buffer_size=buffer_size) def create_dataloaders(tokenizer, accelerator): ds_kwargs = {"streaming": True} # In distributed training, the load_dataset function gaurantees that only one process # can concurrently download the dataset. datasets = load_dataset( "c4", "en", cache_dir="cache_dir", **ds_kwargs, ) train_data, valid_data = datasets["train"], datasets["validation"] with accelerator.main_process_first(): train_data = train_data.shuffle(buffer_size=10000, seed=None) train_dataset = ConstantLengthDataset( tokenizer, train_data, infinite=True, max_seq_length=256, ) valid_dataset = ConstantLengthDataset( tokenizer, valid_data, infinite=False, max_seq_length=256, ) train_dataset = train_dataset.shuffle(buffer_size=10000) train_dataloader = DataLoader(train_dataset, batch_size=160, shuffle=True) eval_dataloader = DataLoader(valid_dataset, batch_size=160) return train_dataloader, eval_dataloader def main(): # Accelerator. logging_dir = "data_save_dir/log" accelerator = Accelerator( gradient_accumulation_steps=1, mixed_precision="bf16", log_with="tensorboard", logging_dir=logging_dir, ) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("test") tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # Load datasets and create dataloaders. train_dataloader, _ = create_dataloaders(tokenizer, accelerator) train_dataloader = accelerator.prepare(train_dataloader) for step, batch in enumerate(train_dataloader, start=1): print(step) accelerator.end_training() if __name__ == "__main__": main() ``` ## Results expected Being able to run the code for streamining datasets with multi-gpu ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: linux - Python version: 3.9.12 - PyArrow version: 9.0.0 @lhoestq I do not have any idea why this freezing happens, and I removed the streaming mode and this was working fine, so I know this is caused by streaming mode of the dataloader part not working well with multi-gpu setting. Since datasets are large, I hope to keep the streamining mode. I very much appreciate your help.
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5123/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5123/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5712
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5712/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5712/comments
https://api.github.com/repos/huggingface/datasets/issues/5712/events
https://github.com/huggingface/datasets/issues/5712
1,655,972,106
I_kwDODunzps5itCEK
5,712
load_dataset in v2.11.0 raises "ValueError: seek of closed file" in np.load()
{ "avatar_url": "https://avatars.githubusercontent.com/u/1219084?v=4", "events_url": "https://api.github.com/users/rcasero/events{/privacy}", "followers_url": "https://api.github.com/users/rcasero/followers", "following_url": "https://api.github.com/users/rcasero/following{/other_user}", "gists_url": "https://api.github.com/users/rcasero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rcasero", "id": 1219084, "login": "rcasero", "node_id": "MDQ6VXNlcjEyMTkwODQ=", "organizations_url": "https://api.github.com/users/rcasero/orgs", "received_events_url": "https://api.github.com/users/rcasero/received_events", "repos_url": "https://api.github.com/users/rcasero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rcasero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rcasero/subscriptions", "type": "User", "url": "https://api.github.com/users/rcasero", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Closing since this is a duplicate of #5711", "> Closing since this is a duplicate of #5711\r\n\r\nSorry @mariosasko , my internet went down went submitting the issue, and somehow it ended up creating a duplicate" ]
2023-04-05T16:47:10Z
2023-04-06T08:32:37Z
2023-04-05T17:17:44Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hi, I have some `dataset_load()` code of a custom offline dataset that works with datasets v2.10.1. ```python ds = datasets.load_dataset(path=dataset_dir, name=configuration, data_dir=dataset_dir, cache_dir=cache_dir, aux_dir=aux_dir, # download_mode=datasets.DownloadMode.FORCE_REDOWNLOAD, num_proc=18) ``` When upgrading datasets to 2.11.0, it fails with error ``` Traceback (most recent call last): File "<string>", line 2, in <module> File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/load.py", line 1791, in load_dataset builder_instance.download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 891, in download_and_prepare self._download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 1651, in _download_and_prepare super()._download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 964, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/ramon.casero/.cache/huggingface/modules/datasets_modules/datasets/71f67f69e6e00e139903a121f96b71f39b65a6b6aaeb0862e6a5da3a3f565b4c/mydataset.py", line 682, in _split_generators self.some_function() File "/home/ramon.casero/.cache/huggingface/modules/datasets_modules/datasets/71f67f69e6e00e139903a121f96b71f39b65a6b6aaeb0862e6a5da3a3f565b4c/mydataset.py", line 1314, in some_function() x_df = pd.DataFrame({'cell_type_descriptor': fp['x'].tolist()}) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/numpy/lib/npyio.py", line 248, in __getitem__ bytes = self.zip.open(key) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/zipfile.py", line 1530, in open fheader = zef_file.read(sizeFileHeader) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/zipfile.py", line 744, in read self._file.seek(self._pos) ValueError: seek of closed file ``` ### Steps to reproduce the bug Sorry, I cannot share the data or code because they are not mine to share, but the point of failure is a call in `some_function()` ```python with np.load(filename) as fp: x_df = pd.DataFrame({'feature': fp['x'].tolist()}) ``` I'll try to generate a short snippet that reproduces the error. ### Expected behavior I would expect that `load_dataset` works on the custom datasets generation script for v2.11.0 the same way it works for 2.10.1, without making `np.load()` give a `ValueError: seek of closed file` error. ### Environment info - `datasets` version: 2.11.0 - Platform: Linux-4.18.0-483.el8.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.8 - Huggingface_hub version: 0.12.0 - PyArrow version: 11.0.0 - Pandas version: 1.5.2 - numpy: 1.24.2 - This is an offline dataset that uses `datasets.config.HF_DATASETS_OFFLINE = True` in the generation script.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5712/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5712/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5559
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5559/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5559/comments
https://api.github.com/repos/huggingface/datasets/issues/5559/events
https://github.com/huggingface/datasets/pull/5559
1,593,676,489
PR_kwDODunzps5KcKSb
5,559
Fix map suffix_template
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.011596 / 0.011353 (0.000244) | 0.005845 / 0.011008 (-0.005164) | 0.121302 / 0.038508 (0.082794) | 0.034306 / 0.023109 (0.011196) | 0.355973 / 0.275898 (0.080075) | 0.419903 / 0.323480 (0.096423) | 0.009049 / 0.007986 (0.001064) | 0.004245 / 0.004328 (-0.000084) | 0.092004 / 0.004250 (0.087753) | 0.042782 / 0.037052 (0.005730) | 0.355805 / 0.258489 (0.097316) | 0.407298 / 0.293841 (0.113457) | 0.052481 / 0.128546 (-0.076066) | 0.020880 / 0.075646 (-0.054766) | 0.379948 / 0.419271 (-0.039324) | 0.061337 / 0.043533 (0.017804) | 0.359829 / 0.255139 (0.104690) | 0.379244 / 0.283200 (0.096044) | 0.116692 / 0.141683 (-0.024990) | 1.733717 / 1.452155 (0.281562) | 1.700246 / 1.492716 (0.207530) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.014622 / 0.018006 (-0.003384) | 0.518777 / 0.000490 (0.518288) | 0.004086 / 0.000200 (0.003886) | 0.000136 / 0.000054 (0.000082) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031208 / 0.037411 (-0.006204) | 0.143003 / 0.014526 (0.128477) | 0.132625 / 0.176557 (-0.043932) | 0.187681 / 0.737135 (-0.549455) | 0.136576 / 0.296338 (-0.159763) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.626516 / 0.215209 (0.411307) | 6.282558 / 2.077655 (4.204904) | 2.702686 / 1.504120 (1.198566) | 2.287445 / 1.541195 (0.746250) | 2.333014 / 1.468490 (0.864524) | 1.227815 / 4.584777 (-3.356962) | 5.545640 / 3.745712 (1.799928) | 4.953226 / 5.269862 (-0.316635) | 2.774549 / 4.565676 (-1.791128) | 0.145257 / 0.424275 (-0.279018) | 0.014887 / 0.007607 (0.007280) | 0.812226 / 0.226044 (0.586182) | 8.002727 / 2.268929 (5.733798) | 3.314852 / 55.444624 (-52.129773) | 2.602348 / 6.876477 (-4.274128) | 2.593511 / 2.142072 (0.451438) | 1.440498 / 4.805227 (-3.364730) | 0.254849 / 6.500664 (-6.245815) | 0.077020 / 0.075469 (0.001551) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.487633 / 1.841788 (-0.354155) | 17.385773 / 8.074308 (9.311465) | 21.775511 / 10.191392 (11.584118) | 0.273514 / 0.680424 (-0.406910) | 0.059644 / 0.534201 (-0.474557) | 0.578710 / 0.579283 (-0.000573) | 0.630221 / 0.434364 (0.195857) | 0.632089 / 0.540337 (0.091752) | 0.762367 / 1.386936 (-0.624569) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009513 / 0.011353 (-0.001840) | 0.006009 / 0.011008 (-0.004999) | 0.087589 / 0.038508 (0.049081) | 0.037487 / 0.023109 (0.014378) | 0.397660 / 0.275898 (0.121762) | 0.474438 / 0.323480 (0.150958) | 0.007373 / 0.007986 (-0.000613) | 0.005839 / 0.004328 (0.001511) | 0.092759 / 0.004250 (0.088509) | 0.052128 / 0.037052 (0.015075) | 0.382378 / 0.258489 (0.123889) | 0.458244 / 0.293841 (0.164403) | 0.057232 / 0.128546 (-0.071314) | 0.020662 / 0.075646 (-0.054984) | 0.110314 / 0.419271 (-0.308957) | 0.063014 / 0.043533 (0.019481) | 0.386020 / 0.255139 (0.130881) | 0.476169 / 0.283200 (0.192970) | 0.118081 / 0.141683 (-0.023602) | 1.724158 / 1.452155 (0.272003) | 1.862257 / 1.492716 (0.369541) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.224288 / 0.018006 (0.206281) | 0.523631 / 0.000490 (0.523141) | 0.004420 / 0.000200 (0.004220) | 0.000127 / 0.000054 (0.000073) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032359 / 0.037411 (-0.005052) | 0.140045 / 0.014526 (0.125519) | 0.138164 / 0.176557 (-0.038393) | 0.181068 / 0.737135 (-0.556067) | 0.143965 / 0.296338 (-0.152374) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.573809 / 0.215209 (0.358600) | 6.083247 / 2.077655 (4.005592) | 2.671258 / 1.504120 (1.167138) | 2.277062 / 1.541195 (0.735868) | 2.299544 / 1.468490 (0.831054) | 1.267351 / 4.584777 (-3.317425) | 5.494461 / 3.745712 (1.748749) | 5.083169 / 5.269862 (-0.186692) | 2.531738 / 4.565676 (-2.033938) | 0.151834 / 0.424275 (-0.272441) | 0.014123 / 0.007607 (0.006516) | 0.800222 / 0.226044 (0.574177) | 7.637624 / 2.268929 (5.368695) | 3.325574 / 55.444624 (-52.119050) | 2.563008 / 6.876477 (-4.313468) | 2.596259 / 2.142072 (0.454187) | 1.459206 / 4.805227 (-3.346021) | 0.237771 / 6.500664 (-6.262893) | 0.071854 / 0.075469 (-0.003615) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.605504 / 1.841788 (-0.236284) | 17.593594 / 8.074308 (9.519285) | 20.618005 / 10.191392 (10.426612) | 0.270938 / 0.680424 (-0.409486) | 0.026205 / 0.534201 (-0.507996) | 0.562223 / 0.579283 (-0.017060) | 0.617571 / 0.434364 (0.183207) | 0.616398 / 0.540337 (0.076060) | 0.715293 / 1.386936 (-0.671643) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#673dc0dd7d063b2313f7adcc9e0be53d4718f5cf \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.013213 / 0.011353 (0.001860) | 0.006253 / 0.011008 (-0.004756) | 0.125175 / 0.038508 (0.086667) | 0.037491 / 0.023109 (0.014382) | 0.401379 / 0.275898 (0.125481) | 0.395826 / 0.323480 (0.072346) | 0.009224 / 0.007986 (0.001238) | 0.005163 / 0.004328 (0.000835) | 0.096490 / 0.004250 (0.092239) | 0.042473 / 0.037052 (0.005420) | 0.383713 / 0.258489 (0.125224) | 0.429234 / 0.293841 (0.135393) | 0.063261 / 0.128546 (-0.065285) | 0.020114 / 0.075646 (-0.055532) | 0.401687 / 0.419271 (-0.017585) | 0.062831 / 0.043533 (0.019298) | 0.405211 / 0.255139 (0.150072) | 0.380810 / 0.283200 (0.097610) | 0.109166 / 0.141683 (-0.032517) | 1.869580 / 1.452155 (0.417426) | 1.949947 / 1.492716 (0.457231) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.207481 / 0.018006 (0.189475) | 0.504161 / 0.000490 (0.503671) | 0.008429 / 0.000200 (0.008229) | 0.000101 / 0.000054 (0.000047) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.029182 / 0.037411 (-0.008229) | 0.126284 / 0.014526 (0.111758) | 0.140381 / 0.176557 (-0.036175) | 0.175878 / 0.737135 (-0.561257) | 0.138824 / 0.296338 (-0.157514) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.643658 / 0.215209 (0.428449) | 6.396224 / 2.077655 (4.318569) | 2.600702 / 1.504120 (1.096582) | 2.176721 / 1.541195 (0.635526) | 2.216116 / 1.468490 (0.747626) | 1.235069 / 4.584777 (-3.349708) | 5.457228 / 3.745712 (1.711516) | 3.060455 / 5.269862 (-2.209407) | 2.028123 / 4.565676 (-2.537554) | 0.141617 / 0.424275 (-0.282658) | 0.016596 / 0.007607 (0.008989) | 0.804915 / 0.226044 (0.578870) | 7.968821 / 2.268929 (5.699893) | 3.340650 / 55.444624 (-52.103974) | 2.533620 / 6.876477 (-4.342856) | 2.457388 / 2.142072 (0.315315) | 1.486527 / 4.805227 (-3.318700) | 0.253767 / 6.500664 (-6.246897) | 0.082192 / 0.075469 (0.006723) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.470896 / 1.841788 (-0.370892) | 17.566637 / 8.074308 (9.492329) | 23.144148 / 10.191392 (12.952756) | 0.235510 / 0.680424 (-0.444913) | 0.046051 / 0.534201 (-0.488150) | 0.559954 / 0.579283 (-0.019329) | 0.645390 / 0.434364 (0.211026) | 0.690983 / 0.540337 (0.150646) | 0.776252 / 1.386936 (-0.610684) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.010564 / 0.011353 (-0.000789) | 0.006150 / 0.011008 (-0.004858) | 0.100030 / 0.038508 (0.061522) | 0.036873 / 0.023109 (0.013764) | 0.448508 / 0.275898 (0.172610) | 0.492593 / 0.323480 (0.169113) | 0.007337 / 0.007986 (-0.000648) | 0.004804 / 0.004328 (0.000475) | 0.099218 / 0.004250 (0.094967) | 0.055513 / 0.037052 (0.018461) | 0.462147 / 0.258489 (0.203658) | 0.510229 / 0.293841 (0.216388) | 0.055307 / 0.128546 (-0.073239) | 0.021989 / 0.075646 (-0.053657) | 0.118487 / 0.419271 (-0.300785) | 0.071752 / 0.043533 (0.028219) | 0.456572 / 0.255139 (0.201433) | 0.475160 / 0.283200 (0.191961) | 0.117472 / 0.141683 (-0.024211) | 1.813212 / 1.452155 (0.361058) | 1.908413 / 1.492716 (0.415696) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.352929 / 0.018006 (0.334923) | 0.543874 / 0.000490 (0.543384) | 0.078529 / 0.000200 (0.078329) | 0.000669 / 0.000054 (0.000614) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.033157 / 0.037411 (-0.004254) | 0.162503 / 0.014526 (0.147977) | 0.146424 / 0.176557 (-0.030132) | 0.201781 / 0.737135 (-0.535354) | 0.168110 / 0.296338 (-0.128229) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.644205 / 0.215209 (0.428996) | 6.327519 / 2.077655 (4.249865) | 2.728102 / 1.504120 (1.223982) | 2.306426 / 1.541195 (0.765232) | 2.373125 / 1.468490 (0.904635) | 1.350649 / 4.584777 (-3.234128) | 5.652714 / 3.745712 (1.907002) | 3.175335 / 5.269862 (-2.094526) | 2.222902 / 4.565676 (-2.342775) | 0.160609 / 0.424275 (-0.263666) | 0.015596 / 0.007607 (0.007989) | 0.790357 / 0.226044 (0.564313) | 8.289758 / 2.268929 (6.020830) | 3.479215 / 55.444624 (-51.965410) | 2.860063 / 6.876477 (-4.016413) | 2.806720 / 2.142072 (0.664648) | 1.639046 / 4.805227 (-3.166181) | 0.267017 / 6.500664 (-6.233648) | 0.083990 / 0.075469 (0.008521) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.632262 / 1.841788 (-0.209525) | 17.794357 / 8.074308 (9.720049) | 21.203547 / 10.191392 (11.012155) | 0.250899 / 0.680424 (-0.429525) | 0.024502 / 0.534201 (-0.509699) | 0.519960 / 0.579283 (-0.059323) | 0.615412 / 0.434364 (0.181048) | 0.641914 / 0.540337 (0.101577) | 0.772355 / 1.386936 (-0.614581) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#32cc4d10243b0feb69650f007d010971fd861dc1 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009501 / 0.011353 (-0.001852) | 0.005262 / 0.011008 (-0.005747) | 0.100809 / 0.038508 (0.062301) | 0.036601 / 0.023109 (0.013492) | 0.299612 / 0.275898 (0.023714) | 0.366970 / 0.323480 (0.043490) | 0.007879 / 0.007986 (-0.000107) | 0.004216 / 0.004328 (-0.000113) | 0.076749 / 0.004250 (0.072498) | 0.042081 / 0.037052 (0.005029) | 0.299572 / 0.258489 (0.041083) | 0.339687 / 0.293841 (0.045846) | 0.038706 / 0.128546 (-0.089840) | 0.012295 / 0.075646 (-0.063352) | 0.336172 / 0.419271 (-0.083100) | 0.047524 / 0.043533 (0.003992) | 0.296800 / 0.255139 (0.041661) | 0.331592 / 0.283200 (0.048393) | 0.101191 / 0.141683 (-0.040491) | 1.486200 / 1.452155 (0.034046) | 1.509955 / 1.492716 (0.017239) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.204735 / 0.018006 (0.186728) | 0.446381 / 0.000490 (0.445891) | 0.005177 / 0.000200 (0.004977) | 0.000099 / 0.000054 (0.000045) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028655 / 0.037411 (-0.008756) | 0.116559 / 0.014526 (0.102033) | 0.122551 / 0.176557 (-0.054006) | 0.189764 / 0.737135 (-0.547372) | 0.126446 / 0.296338 (-0.169892) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.400104 / 0.215209 (0.184895) | 4.001524 / 2.077655 (1.923869) | 1.779267 / 1.504120 (0.275147) | 1.580168 / 1.541195 (0.038974) | 1.684100 / 1.468490 (0.215610) | 0.703354 / 4.584777 (-3.881423) | 3.828131 / 3.745712 (0.082419) | 2.098500 / 5.269862 (-3.171362) | 1.331161 / 4.565676 (-3.234516) | 0.085417 / 0.424275 (-0.338858) | 0.012380 / 0.007607 (0.004772) | 0.504189 / 0.226044 (0.278144) | 5.094672 / 2.268929 (2.825743) | 2.264352 / 55.444624 (-53.180272) | 1.909573 / 6.876477 (-4.966904) | 2.005425 / 2.142072 (-0.136648) | 0.840893 / 4.805227 (-3.964335) | 0.164689 / 6.500664 (-6.335975) | 0.062754 / 0.075469 (-0.012715) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.250001 / 1.841788 (-0.591786) | 14.993313 / 8.074308 (6.919005) | 14.880601 / 10.191392 (4.689209) | 0.175141 / 0.680424 (-0.505283) | 0.028952 / 0.534201 (-0.505249) | 0.447073 / 0.579283 (-0.132210) | 0.445993 / 0.434364 (0.011629) | 0.525527 / 0.540337 (-0.014811) | 0.613156 / 1.386936 (-0.773780) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007796 / 0.011353 (-0.003557) | 0.005399 / 0.011008 (-0.005609) | 0.078240 / 0.038508 (0.039732) | 0.035303 / 0.023109 (0.012193) | 0.364603 / 0.275898 (0.088705) | 0.400794 / 0.323480 (0.077314) | 0.006152 / 0.007986 (-0.001834) | 0.004324 / 0.004328 (-0.000004) | 0.074949 / 0.004250 (0.070698) | 0.051939 / 0.037052 (0.014887) | 0.377079 / 0.258489 (0.118590) | 0.413630 / 0.293841 (0.119789) | 0.037567 / 0.128546 (-0.090979) | 0.012793 / 0.075646 (-0.062854) | 0.089013 / 0.419271 (-0.330258) | 0.050748 / 0.043533 (0.007215) | 0.370100 / 0.255139 (0.114961) | 0.384838 / 0.283200 (0.101638) | 0.105840 / 0.141683 (-0.035843) | 1.476490 / 1.452155 (0.024335) | 1.544688 / 1.492716 (0.051972) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.220987 / 0.018006 (0.202981) | 0.443801 / 0.000490 (0.443311) | 0.005747 / 0.000200 (0.005547) | 0.000106 / 0.000054 (0.000051) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030187 / 0.037411 (-0.007225) | 0.118230 / 0.014526 (0.103704) | 0.126810 / 0.176557 (-0.049746) | 0.200482 / 0.737135 (-0.536654) | 0.130831 / 0.296338 (-0.165507) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.423231 / 0.215209 (0.208022) | 4.196576 / 2.077655 (2.118921) | 1.992919 / 1.504120 (0.488799) | 1.809172 / 1.541195 (0.267977) | 1.932706 / 1.468490 (0.464216) | 0.727319 / 4.584777 (-3.857458) | 3.833295 / 3.745712 (0.087583) | 3.527005 / 5.269862 (-1.742857) | 1.937348 / 4.565676 (-2.628329) | 0.088713 / 0.424275 (-0.335562) | 0.012711 / 0.007607 (0.005104) | 0.531385 / 0.226044 (0.305341) | 5.308051 / 2.268929 (3.039123) | 2.493494 / 55.444624 (-52.951131) | 2.168359 / 6.876477 (-4.708118) | 2.258160 / 2.142072 (0.116088) | 0.865629 / 4.805227 (-3.939598) | 0.171281 / 6.500664 (-6.329383) | 0.065746 / 0.075469 (-0.009723) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.290378 / 1.841788 (-0.551409) | 15.900804 / 8.074308 (7.826496) | 14.809614 / 10.191392 (4.618222) | 0.177287 / 0.680424 (-0.503137) | 0.017875 / 0.534201 (-0.516326) | 0.429646 / 0.579283 (-0.149637) | 0.451646 / 0.434364 (0.017282) | 0.545669 / 0.540337 (0.005332) | 0.633215 / 1.386936 (-0.753721) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#2c67b5f4bc9cea088e977a135644d38da8c144ff \"CML watermark\")\n" ]
2023-02-21T15:26:26Z
2023-02-21T17:21:37Z
2023-02-21T17:14:29Z
MEMBER
null
null
null
#5455 introduced a small bug that lead `map` to ignore the `suffix_template` argument and not put suffixes to cached files in multiprocessing. I fixed this and also improved a few things: - regarding logging: "Loading cached processed dataset" is now logged only once even in multiprocessing (it used to be logged `num_proc` times) - regarding new_fingerprint: I made sure that the returned dataset satisfies `ds._fingerprint==new_fingerprint` if `new_fingerprint` is passed to `map`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 2, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/5559/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5559/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5559.diff", "html_url": "https://github.com/huggingface/datasets/pull/5559", "merged_at": "2023-02-21T17:14:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/5559.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5559" }
https://api.github.com/repos/huggingface/datasets/issues/6971
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6971/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6971/comments
https://api.github.com/repos/huggingface/datasets/issues/6971/events
https://github.com/huggingface/datasets/pull/6971
2,351,830,856
PR_kwDODunzps5yZoc3
6,971
packaging: Remove useless dependencies
{ "avatar_url": "https://avatars.githubusercontent.com/u/9336514?v=4", "events_url": "https://api.github.com/users/daskol/events{/privacy}", "followers_url": "https://api.github.com/users/daskol/followers", "following_url": "https://api.github.com/users/daskol/following{/other_user}", "gists_url": "https://api.github.com/users/daskol/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/daskol", "id": 9336514, "login": "daskol", "node_id": "MDQ6VXNlcjkzMzY1MTQ=", "organizations_url": "https://api.github.com/users/daskol/orgs", "received_events_url": "https://api.github.com/users/daskol/received_events", "repos_url": "https://api.github.com/users/daskol/repos", "site_admin": false, "starred_url": "https://api.github.com/users/daskol/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/daskol/subscriptions", "type": "User", "url": "https://api.github.com/users/daskol", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6971). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "@HuggingFaceDocBuilderDev There is no doc for this change. Call a human.", "Haha it was me who triggered the CI for your PR", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005051 / 0.011353 (-0.006302) | 0.004831 / 0.011008 (-0.006178) | 0.063006 / 0.038508 (0.024498) | 0.031589 / 0.023109 (0.008480) | 0.296202 / 0.275898 (0.020304) | 0.274274 / 0.323480 (-0.049205) | 0.003199 / 0.007986 (-0.004786) | 0.002768 / 0.004328 (-0.001561) | 0.049422 / 0.004250 (0.045172) | 0.045174 / 0.037052 (0.008121) | 0.263814 / 0.258489 (0.005325) | 0.288125 / 0.293841 (-0.005716) | 0.027641 / 0.128546 (-0.100905) | 0.010439 / 0.075646 (-0.065207) | 0.203075 / 0.419271 (-0.216196) | 0.036259 / 0.043533 (-0.007274) | 0.245159 / 0.255139 (-0.009980) | 0.268897 / 0.283200 (-0.014303) | 0.019493 / 0.141683 (-0.122190) | 1.108330 / 1.452155 (-0.343824) | 1.155835 / 1.492716 (-0.336881) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096860 / 0.018006 (0.078854) | 0.309428 / 0.000490 (0.308938) | 0.000197 / 0.000200 (-0.000003) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019275 / 0.037411 (-0.018136) | 0.062623 / 0.014526 (0.048098) | 0.073871 / 0.176557 (-0.102686) | 0.120410 / 0.737135 (-0.616726) | 0.075766 / 0.296338 (-0.220572) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.279876 / 0.215209 (0.064667) | 2.742429 / 2.077655 (0.664774) | 1.414368 / 1.504120 (-0.089752) | 1.293194 / 1.541195 (-0.248001) | 1.318043 / 1.468490 (-0.150447) | 0.570904 / 4.584777 (-4.013873) | 2.384386 / 3.745712 (-1.361326) | 2.757953 / 5.269862 (-2.511908) | 1.728766 / 4.565676 (-2.836910) | 0.062699 / 0.424275 (-0.361576) | 0.004951 / 0.007607 (-0.002656) | 0.332222 / 0.226044 (0.106177) | 3.407429 / 2.268929 (1.138500) | 1.777136 / 55.444624 (-53.667488) | 1.521269 / 6.876477 (-5.355207) | 1.544814 / 2.142072 (-0.597258) | 0.646249 / 4.805227 (-4.158978) | 0.117032 / 6.500664 (-6.383632) | 0.042274 / 0.075469 (-0.033195) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.016249 / 1.841788 (-0.825539) | 11.794003 / 8.074308 (3.719695) | 9.871925 / 10.191392 (-0.319467) | 0.133694 / 0.680424 (-0.546730) | 0.014904 / 0.534201 (-0.519297) | 0.287453 / 0.579283 (-0.291831) | 0.271802 / 0.434364 (-0.162561) | 0.324711 / 0.540337 (-0.215626) | 0.411812 / 1.386936 (-0.975124) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005376 / 0.011353 (-0.005977) | 0.003631 / 0.011008 (-0.007377) | 0.050154 / 0.038508 (0.011646) | 0.033665 / 0.023109 (0.010556) | 0.279062 / 0.275898 (0.003164) | 0.298899 / 0.323480 (-0.024581) | 0.004388 / 0.007986 (-0.003598) | 0.002810 / 0.004328 (-0.001518) | 0.049032 / 0.004250 (0.044781) | 0.040531 / 0.037052 (0.003478) | 0.287220 / 0.258489 (0.028731) | 0.319060 / 0.293841 (0.025219) | 0.029473 / 0.128546 (-0.099073) | 0.010317 / 0.075646 (-0.065329) | 0.058483 / 0.419271 (-0.360789) | 0.033359 / 0.043533 (-0.010174) | 0.276404 / 0.255139 (0.021265) | 0.295013 / 0.283200 (0.011813) | 0.019372 / 0.141683 (-0.122311) | 1.172624 / 1.452155 (-0.279531) | 1.176815 / 1.492716 (-0.315902) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.097347 / 0.018006 (0.079341) | 0.306959 / 0.000490 (0.306469) | 0.000200 / 0.000200 (-0.000000) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022776 / 0.037411 (-0.014635) | 0.077865 / 0.014526 (0.063340) | 0.088806 / 0.176557 (-0.087751) | 0.130448 / 0.737135 (-0.606687) | 0.090973 / 0.296338 (-0.205365) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.301168 / 0.215209 (0.085959) | 2.957634 / 2.077655 (0.879979) | 1.556999 / 1.504120 (0.052879) | 1.413940 / 1.541195 (-0.127255) | 1.427970 / 1.468490 (-0.040520) | 0.587653 / 4.584777 (-3.997124) | 0.951295 / 3.745712 (-2.794417) | 2.691004 / 5.269862 (-2.578858) | 1.755826 / 4.565676 (-2.809851) | 0.064883 / 0.424275 (-0.359392) | 0.005379 / 0.007607 (-0.002228) | 0.353790 / 0.226044 (0.127745) | 3.457747 / 2.268929 (1.188818) | 1.891884 / 55.444624 (-53.552740) | 1.616619 / 6.876477 (-5.259858) | 1.736167 / 2.142072 (-0.405906) | 0.669257 / 4.805227 (-4.135970) | 0.119620 / 6.500664 (-6.381044) | 0.041390 / 0.075469 (-0.034080) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.008851 / 1.841788 (-0.832937) | 13.151216 / 8.074308 (5.076908) | 10.398371 / 10.191392 (0.206979) | 0.143420 / 0.680424 (-0.537004) | 0.015759 / 0.534201 (-0.518442) | 0.293068 / 0.579283 (-0.286215) | 0.131449 / 0.434364 (-0.302914) | 0.334715 / 0.540337 (-0.205623) | 0.445824 / 1.386936 (-0.941112) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#087671dcaf817c906a8649404c07b0440e2732ea \"CML watermark\")\n" ]
2024-06-13T18:43:43Z
2024-06-14T14:03:34Z
2024-06-14T13:57:24Z
CONTRIBUTOR
null
null
null
Revert changes in #6396 and #6404. CVE-2023-47248 has been fixed since PyArrow v14.0.1. Meanwhile Python requirements requires `pyarrow>=15.0.0`.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6971/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6971/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6971.diff", "html_url": "https://github.com/huggingface/datasets/pull/6971", "merged_at": "2024-06-14T13:57:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/6971.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6971" }
https://api.github.com/repos/huggingface/datasets/issues/7190
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7190/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7190/comments
https://api.github.com/repos/huggingface/datasets/issues/7190/events
https://github.com/huggingface/datasets/issues/7190
2,562,162,725
I_kwDODunzps6Yt4Al
7,190
Datasets conflicts with fsspec 2024.9
{ "avatar_url": "https://avatars.githubusercontent.com/u/162599174?v=4", "events_url": "https://api.github.com/users/cw-igormorgado/events{/privacy}", "followers_url": "https://api.github.com/users/cw-igormorgado/followers", "following_url": "https://api.github.com/users/cw-igormorgado/following{/other_user}", "gists_url": "https://api.github.com/users/cw-igormorgado/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cw-igormorgado", "id": 162599174, "login": "cw-igormorgado", "node_id": "U_kgDOCbERBg", "organizations_url": "https://api.github.com/users/cw-igormorgado/orgs", "received_events_url": "https://api.github.com/users/cw-igormorgado/received_events", "repos_url": "https://api.github.com/users/cw-igormorgado/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cw-igormorgado/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cw-igormorgado/subscriptions", "type": "User", "url": "https://api.github.com/users/cw-igormorgado", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "Yes, I need to use the latest version of fsspec and datasets for my usecase. \r\nhttps://github.com/fsspec/s3fs/pull/888#issuecomment-2404204606\r\nhttps://github.com/apache/arrow/issues/34363#issuecomment-2403553473\r\n\r\nlast version where things install without conflict is: 2.14.4\r\n\r\nSo this issue starts from:\r\nhttps://github.com/huggingface/datasets/releases/tag/2.14.5" ]
2024-10-02T16:43:46Z
2024-10-10T07:33:18Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Installing both in latest versions are not possible `pip install "datasets==3.0.1" "fsspec==2024.9.0"` But using older version of datasets is ok `pip install "datasets==1.24.4" "fsspec==2024.9.0"` ### Steps to reproduce the bug `pip install "datasets==3.0.1" "fsspec==2024.9.0"` ### Expected behavior install both versions. ### Environment info debian 11. python 3.10.15
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7190/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7190/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7012
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7012/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7012/comments
https://api.github.com/repos/huggingface/datasets/issues/7012/events
https://github.com/huggingface/datasets/pull/7012
2,380,934,047
PR_kwDODunzps5z61A3
7,012
Raise an error when a nested object is expected to be a mapping that displays the object
{ "avatar_url": "https://avatars.githubusercontent.com/u/22511797?v=4", "events_url": "https://api.github.com/users/sebbyjp/events{/privacy}", "followers_url": "https://api.github.com/users/sebbyjp/followers", "following_url": "https://api.github.com/users/sebbyjp/following{/other_user}", "gists_url": "https://api.github.com/users/sebbyjp/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sebbyjp", "id": 22511797, "login": "sebbyjp", "node_id": "MDQ6VXNlcjIyNTExNzk3", "organizations_url": "https://api.github.com/users/sebbyjp/orgs", "received_events_url": "https://api.github.com/users/sebbyjp/received_events", "repos_url": "https://api.github.com/users/sebbyjp/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sebbyjp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sebbyjp/subscriptions", "type": "User", "url": "https://api.github.com/users/sebbyjp", "user_view_type": "public" }
[]
closed
false
null
[]
null
[]
2024-06-28T18:10:59Z
2024-07-11T02:06:16Z
2024-07-11T02:06:16Z
NONE
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/159743133?v=4", "events_url": "https://api.github.com/users/mbodiai/events{/privacy}", "followers_url": "https://api.github.com/users/mbodiai/followers", "following_url": "https://api.github.com/users/mbodiai/following{/other_user}", "gists_url": "https://api.github.com/users/mbodiai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mbodiai", "id": 159743133, "login": "mbodiai", "node_id": "O_kgDOCYV8nQ", "organizations_url": "https://api.github.com/users/mbodiai/orgs", "received_events_url": "https://api.github.com/users/mbodiai/received_events", "repos_url": "https://api.github.com/users/mbodiai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mbodiai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mbodiai/subscriptions", "type": "Organization", "url": "https://api.github.com/users/mbodiai", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7012/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7012/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7012.diff", "html_url": "https://github.com/huggingface/datasets/pull/7012", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7012.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7012" }
https://api.github.com/repos/huggingface/datasets/issues/4657
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4657/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4657/comments
https://api.github.com/repos/huggingface/datasets/issues/4657/events
https://github.com/huggingface/datasets/issues/4657
1,296,743,133
I_kwDODunzps5NSrrd
4,657
Add SQuAD2.0 Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/omarespejel", "id": 4755430, "login": "omarespejel", "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "repos_url": "https://api.github.com/users/omarespejel/repos", "site_admin": false, "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "type": "User", "url": "https://api.github.com/users/omarespejel", "user_view_type": "public" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
null
[ "Hey, It's already present [here](https://huggingface.co/datasets/squad_v2) ", "Hi! This dataset is indeed already available on the Hub. Closing." ]
2022-07-07T03:19:36Z
2022-07-12T16:14:52Z
2022-07-12T16:14:52Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
## Adding a Dataset - **Name:** *SQuAD2.0* - **Description:** *Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.* - **Paper:** *https://aclanthology.org/P18-2124.pdf* - **Data:** *https://rajpurkar.github.io/SQuAD-explorer/* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4657/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4657/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6927
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6927/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6927/comments
https://api.github.com/repos/huggingface/datasets/issues/6927/events
https://github.com/huggingface/datasets/pull/6927
2,322,260,725
PR_kwDODunzps5w1CmF
6,927
Update process.mdx: Minor Code Listings Updates and Fixes
{ "avatar_url": "https://avatars.githubusercontent.com/u/16918280?v=4", "events_url": "https://api.github.com/users/FadyMorris/events{/privacy}", "followers_url": "https://api.github.com/users/FadyMorris/followers", "following_url": "https://api.github.com/users/FadyMorris/following{/other_user}", "gists_url": "https://api.github.com/users/FadyMorris/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FadyMorris", "id": 16918280, "login": "FadyMorris", "node_id": "MDQ6VXNlcjE2OTE4Mjgw", "organizations_url": "https://api.github.com/users/FadyMorris/orgs", "received_events_url": "https://api.github.com/users/FadyMorris/received_events", "repos_url": "https://api.github.com/users/FadyMorris/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FadyMorris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FadyMorris/subscriptions", "type": "User", "url": "https://api.github.com/users/FadyMorris", "user_view_type": "public" }
[]
closed
false
null
[]
null
[]
2024-05-29T03:09:01Z
2024-05-29T03:12:46Z
2024-05-29T03:12:46Z
CONTRIBUTOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/16918280?v=4", "events_url": "https://api.github.com/users/FadyMorris/events{/privacy}", "followers_url": "https://api.github.com/users/FadyMorris/followers", "following_url": "https://api.github.com/users/FadyMorris/following{/other_user}", "gists_url": "https://api.github.com/users/FadyMorris/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/FadyMorris", "id": 16918280, "login": "FadyMorris", "node_id": "MDQ6VXNlcjE2OTE4Mjgw", "organizations_url": "https://api.github.com/users/FadyMorris/orgs", "received_events_url": "https://api.github.com/users/FadyMorris/received_events", "repos_url": "https://api.github.com/users/FadyMorris/repos", "site_admin": false, "starred_url": "https://api.github.com/users/FadyMorris/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FadyMorris/subscriptions", "type": "User", "url": "https://api.github.com/users/FadyMorris", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6927/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6927/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6927.diff", "html_url": "https://github.com/huggingface/datasets/pull/6927", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6927.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6927" }
https://api.github.com/repos/huggingface/datasets/issues/5016
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5016/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5016/comments
https://api.github.com/repos/huggingface/datasets/issues/5016/events
https://github.com/huggingface/datasets/pull/5016
1,383,883,058
PR_kwDODunzps4_gKny
5,016
Fix tar extraction vuln
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-23T14:22:21Z
2022-09-29T12:42:26Z
2022-09-29T12:40:28Z
MEMBER
null
null
null
Fix for CVE-2007-4559 Description: Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot) sequence in filenames in a TAR archive, a related issue to CVE-2001-1267. I fixed it by using the solution proposed in https://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python It blocks extraction of files with an absolute path or double dots and symlinks.
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5016/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5016/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5016.diff", "html_url": "https://github.com/huggingface/datasets/pull/5016", "merged_at": "2022-09-29T12:40:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/5016.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5016" }
https://api.github.com/repos/huggingface/datasets/issues/5086
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5086/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5086/comments
https://api.github.com/repos/huggingface/datasets/issues/5086/events
https://github.com/huggingface/datasets/issues/5086
1,400,216,975
I_kwDODunzps5TdZ2P
5,086
HTTPError: 404 Client Error: Not Found for url
{ "avatar_url": "https://avatars.githubusercontent.com/u/54015474?v=4", "events_url": "https://api.github.com/users/keyuchen21/events{/privacy}", "followers_url": "https://api.github.com/users/keyuchen21/followers", "following_url": "https://api.github.com/users/keyuchen21/following{/other_user}", "gists_url": "https://api.github.com/users/keyuchen21/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/keyuchen21", "id": 54015474, "login": "keyuchen21", "node_id": "MDQ6VXNlcjU0MDE1NDc0", "organizations_url": "https://api.github.com/users/keyuchen21/orgs", "received_events_url": "https://api.github.com/users/keyuchen21/received_events", "repos_url": "https://api.github.com/users/keyuchen21/repos", "site_admin": false, "starred_url": "https://api.github.com/users/keyuchen21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/keyuchen21/subscriptions", "type": "User", "url": "https://api.github.com/users/keyuchen21", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "FYI @lewtun ", "Hi @km5ar, thanks for reporting.\r\n\r\nThis should be fixed in the notebook:\r\n- the filename `datasets-issues-with-hf-doc-builder.jsonl` no longer exists on the repo; instead, current filename is `datasets-issues-with-comments.jsonl`\r\n- see: https://huggingface.co/datasets/lewtun/github-issues/tree/main\r\n\r\nAnyway, depending on your version of `datasets`, you can now use:\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nissues_dataset = load_dataset(\"lewtun/github-issues\")\r\nissues_dataset\r\n```\r\ninstead of:\r\n```python\r\nfrom huggingface_hub import hf_hub_url\r\n\r\ndata_files = hf_hub_url(\r\n repo_id=\"lewtun/github-issues\",\r\n filename=\"datasets-issues-with-hf-doc-builder.jsonl\",\r\n repo_type=\"dataset\",\r\n)\r\nfrom datasets import load_dataset\r\n\r\nissues_dataset = load_dataset(\"json\", data_files=data_files, split=\"train\")\r\nissues_dataset\r\n```\r\n\r\nOutput:\r\n```python\r\nIn [25]: ds = load_dataset(\"lewtun/github-issues\")\r\nDownloading: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10.5k/10.5k [00:00<00:00, 5.75MB/s]\r\nUsing custom data configuration lewtun--github-issues-cff5093ecc410ea2\r\nDownloading and preparing dataset json/lewtun--github-issues to .../.cache/huggingface/datasets/lewtun___json/lewtun--github-issues-cff5093ecc410ea2/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab...\r\nDownloading data: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12.2M/12.2M [00:00<00:00, 26.5MB/s]\r\nDownloading data files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:02<00:00, 2.70s/it]\r\nExtracting data files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1589.96it/s]\r\nDataset json downloaded and prepared to .../.cache/huggingface/datasets/lewtun___json/lewtun--github-issues-cff5093ecc410ea2/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab. Subsequent calls will reuse this data.\r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 133.95it/s]\r\n\r\nIn [26]: ds\r\nOut[26]: \r\nDatasetDict({\r\n train: Dataset({\r\n features: ['url', 'repository_url', 'labels_url', 'comments_url', 'events_url', 'html_url', 'id', 'node_id', 'number', 'title', 'user', 'labels', 'state', 'locked', 'assignee', 'assignees', 'milestone', 'comments', 'created_at', 'updated_at', 'closed_at', 'author_association', 'active_lock_reason', 'pull_request', 'body', 'timeline_url', 'performed_via_github_app', 'is_pull_request'],\r\n num_rows: 3019\r\n })\r\n})\r\n```", "Thanks for reporting @km5ar and thank you @albertvillanova for the quick solution! I'll post a fix on the source too" ]
2022-10-06T19:48:58Z
2022-10-07T15:12:01Z
2022-10-07T15:12:01Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
## Describe the bug I was following chap 5 from huggingface course: https://huggingface.co/course/chapter5/6?fw=tf However, I'm not able to download the datasets, with a 404 erros <img width="1160" alt="iShot2022-10-06_15 54 50" src="https://user-images.githubusercontent.com/54015474/194406327-ae62c2f3-1da5-4686-8631-13d879a0edee.png"> ## Steps to reproduce the bug ```python from huggingface_hub import hf_hub_url data_files = hf_hub_url( repo_id="lewtun/github-issues", filename="datasets-issues-with-hf-doc-builder.jsonl", repo_type="dataset", ) from datasets import load_dataset issues_dataset = load_dataset("json", data_files=data_files, split="train") issues_dataset ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.9.12 - PyArrow version: 9.0.0 - Pandas version: 1.4.4
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5086/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5086/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7204
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7204/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7204/comments
https://api.github.com/repos/huggingface/datasets/issues/7204/events
https://github.com/huggingface/datasets/pull/7204
2,573,289,063
PR_kwDODunzps599Hem
7,204
fix unbatched arrow map for iterable datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/5719745?v=4", "events_url": "https://api.github.com/users/alex-hh/events{/privacy}", "followers_url": "https://api.github.com/users/alex-hh/followers", "following_url": "https://api.github.com/users/alex-hh/following{/other_user}", "gists_url": "https://api.github.com/users/alex-hh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alex-hh", "id": 5719745, "login": "alex-hh", "node_id": "MDQ6VXNlcjU3MTk3NDU=", "organizations_url": "https://api.github.com/users/alex-hh/orgs", "received_events_url": "https://api.github.com/users/alex-hh/received_events", "repos_url": "https://api.github.com/users/alex-hh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alex-hh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alex-hh/subscriptions", "type": "User", "url": "https://api.github.com/users/alex-hh", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7204). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2024-10-08T13:54:09Z
2024-10-08T14:19:47Z
2024-10-08T14:19:47Z
CONTRIBUTOR
null
null
null
Fixes the bug when applying map to an arrow-formatted iterable dataset described here: https://github.com/huggingface/datasets/issues/6833#issuecomment-2399903885 ```python from datasets import load_dataset ds = load_dataset("rotten_tomatoes", split="train", streaming=True) ds = ds.with_format("arrow").map(lambda x: x) for ex in ds: pass ``` @lhoestq
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7204/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7204/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7204.diff", "html_url": "https://github.com/huggingface/datasets/pull/7204", "merged_at": "2024-10-08T14:19:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/7204.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7204" }
https://api.github.com/repos/huggingface/datasets/issues/7325
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7325/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7325/comments
https://api.github.com/repos/huggingface/datasets/issues/7325/events
https://github.com/huggingface/datasets/pull/7325
2,736,618,054
PR_kwDODunzps6FDpMp
7,325
Introduce pdf support (#7318)
{ "avatar_url": "https://avatars.githubusercontent.com/u/4812761?v=4", "events_url": "https://api.github.com/users/yabramuvdi/events{/privacy}", "followers_url": "https://api.github.com/users/yabramuvdi/followers", "following_url": "https://api.github.com/users/yabramuvdi/following{/other_user}", "gists_url": "https://api.github.com/users/yabramuvdi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yabramuvdi", "id": 4812761, "login": "yabramuvdi", "node_id": "MDQ6VXNlcjQ4MTI3NjE=", "organizations_url": "https://api.github.com/users/yabramuvdi/orgs", "received_events_url": "https://api.github.com/users/yabramuvdi/received_events", "repos_url": "https://api.github.com/users/yabramuvdi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yabramuvdi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yabramuvdi/subscriptions", "type": "User", "url": "https://api.github.com/users/yabramuvdi", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7325). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Hi @AndreaFrancis and @lhoestq ! Thanks for looking at the code and for all the changes and suggestions. I have worked on all your suggestions. I need to work a bit more on the tests, but I created a first version and uploaded a simple pdf to use for testing. The pdf has a couple of pages some of which have images, figures, and tables (this is exactly the pdfs for which I think keeping the pdf format is very interesting because they have multiple types of content). Will try to finish the tests as soon as possible.", "(I updated the CI a bit, I took the liberty to update your branch and re-run it on your PR @yabramuvdi)" ]
2024-12-12T18:31:18Z
2025-03-18T14:00:36Z
2025-03-18T14:00:36Z
CONTRIBUTOR
null
null
null
First implementation of the Pdf feature to support pdfs (#7318) . Using [pdfplumber](https://github.com/jsvine/pdfplumber?tab=readme-ov-file#python-library) as the default library to work with pdfs. @lhoestq and @AndreaFrancis
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7325/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7325/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7325.diff", "html_url": "https://github.com/huggingface/datasets/pull/7325", "merged_at": "2025-03-18T14:00:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/7325.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7325" }
https://api.github.com/repos/huggingface/datasets/issues/6563
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6563/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6563/comments
https://api.github.com/repos/huggingface/datasets/issues/6563/events
https://github.com/huggingface/datasets/issues/6563
2,068,302,402
I_kwDODunzps57R8pC
6,563
`ImportError`: cannot import name 'insecure_hashlib' from 'huggingface_hub.utils' (.../huggingface_hub/utils/__init__.py)
{ "avatar_url": "https://avatars.githubusercontent.com/u/79070834?v=4", "events_url": "https://api.github.com/users/wasertech/events{/privacy}", "followers_url": "https://api.github.com/users/wasertech/followers", "following_url": "https://api.github.com/users/wasertech/following{/other_user}", "gists_url": "https://api.github.com/users/wasertech/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/wasertech", "id": 79070834, "login": "wasertech", "node_id": "MDQ6VXNlcjc5MDcwODM0", "organizations_url": "https://api.github.com/users/wasertech/orgs", "received_events_url": "https://api.github.com/users/wasertech/received_events", "repos_url": "https://api.github.com/users/wasertech/repos", "site_admin": false, "starred_url": "https://api.github.com/users/wasertech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wasertech/subscriptions", "type": "User", "url": "https://api.github.com/users/wasertech", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "@Wauplin Do you happen to know what's up?", "<del>Installing `datasets` from `main` did the trick so I guess it will be fixed in the next release.\r\n\r\nNVM https://github.com/huggingface/datasets/blob/d26abadce0b884db32382b92422d8a6aa997d40a/src/datasets/utils/info_utils.py#L5", "@wasertech upgrading `huggingface_hub` to a newer version should fix your issue. Latest version is 0.20.2. ", "Ha yes I had pinned `tokenizers` to an old version so it downgraded `huggingface_hub`. Note to myself keep HuggingFace modules relatively close together chronologically release wise.", "Glad to know your problem's solved! ", "@Wauplin Thanks for your insight 👍", "pip install --upgrade huggingface-hub" ]
2024-01-06T02:28:54Z
2024-03-14T02:59:42Z
2024-01-06T16:13:27Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Yep its not [there](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/utils/__init__.py) anymore. ```text + python /home/trainer/sft_train.py --model_name cognitivecomputations/dolphin-2.2.1-mistral-7b --dataset_name wasertech/OneOS --load_in_4bit --use_peft --batch_size 4 --num_train_epochs 1 --learning_rate 1.41e-5 --gradient_accumulation_steps 8 --seq_length 4096 --output_dir output --log_with wandb Traceback (most recent call last): File "/home/trainer/sft_train.py", line 22, in <module> from datasets import load_dataset File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/__init__.py", line 22, in <module> from .arrow_dataset import Dataset File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 66, in <module> from .arrow_reader import ArrowReader File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/arrow_reader.py", line 30, in <module> from .download.download_config import DownloadConfig File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/download/__init__.py", line 9, in <module> from .download_manager import DownloadManager, DownloadMode File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/download/download_manager.py", line 31, in <module> from ..utils import tqdm as hf_tqdm File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/utils/__init__.py", line 19, in <module> from .info_utils import VerificationMode File "/home/trainer/llm-train/lib/python3.8/site-packages/datasets/utils/info_utils.py", line 5, in <module> from huggingface_hub.utils import insecure_hashlib ImportError: cannot import name 'insecure_hashlib' from 'huggingface_hub.utils' (/home/trainer/llm-train/lib/python3.8/site-packages/huggingface_hub/utils/__init__.py) ``` ### Steps to reproduce the bug Using `datasets==2.16.1` and `huggingface_hub== 0.17.3`, load a dataset with `load_dataset`. ### Expected behavior The dataset should be (downloaded - if needed - and) returned. ### Environment info ```text trainer@a311ae86939e:/mnt$ pip show datasets Name: datasets Version: 2.16.1 Summary: HuggingFace community-driven open-source library of datasets Home-page: https://github.com/huggingface/datasets Author: HuggingFace Inc. Author-email: thomas@huggingface.co License: Apache 2.0 Location: /home/trainer/llm-train/lib/python3.8/site-packages Requires: packaging, pyyaml, multiprocess, pyarrow-hotfix, pandas, pyarrow, xxhash, dill, numpy, aiohttp, tqdm, fsspec, requests, filelock, huggingface-hub Required-by: trl, lm-eval, evaluate trainer@a311ae86939e:/mnt$ pip show huggingface_hub Name: huggingface-hub Version: 0.17.3 Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub Home-page: https://github.com/huggingface/huggingface_hub Author: Hugging Face, Inc. Author-email: julien@huggingface.co License: Apache Location: /home/trainer/llm-train/lib/python3.8/site-packages Requires: requests, pyyaml, packaging, typing-extensions, tqdm, filelock, fsspec Required-by: transformers, tokenizers, peft, evaluate, datasets, accelerate trainer@a311ae86939e:/mnt$ huggingface-cli env Copy-and-paste the text below in your GitHub issue. - huggingface_hub version: 0.17.3 - Platform: Linux-6.5.13-7-MANJARO-x86_64-with-glibc2.29 - Python version: 3.8.10 - Running in iPython ?: No - Running in notebook ?: No - Running in Google Colab ?: No - Token path ?: /home/trainer/.cache/huggingface/token - Has saved token ?: True - Who am I ?: wasertech - Configured git credential helpers: - FastAI: N/A - Tensorflow: N/A - Torch: 2.1.2 - Jinja2: 3.1.2 - Graphviz: N/A - Pydot: N/A - Pillow: 10.2.0 - hf_transfer: N/A - gradio: N/A - tensorboard: N/A - numpy: 1.24.4 - pydantic: N/A - aiohttp: 3.9.1 - ENDPOINT: https://huggingface.co - HUGGINGFACE_HUB_CACHE: /home/trainer/.cache/huggingface/hub - HUGGINGFACE_ASSETS_CACHE: /home/trainer/.cache/huggingface/assets - HF_TOKEN_PATH: /home/trainer/.cache/huggingface/token - HF_HUB_OFFLINE: False - HF_HUB_DISABLE_TELEMETRY: False - HF_HUB_DISABLE_PROGRESS_BARS: None - HF_HUB_DISABLE_SYMLINKS_WARNING: False - HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False - HF_HUB_DISABLE_IMPLICIT_TOKEN: False - HF_HUB_ENABLE_HF_TRANSFER: False ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/79070834?v=4", "events_url": "https://api.github.com/users/wasertech/events{/privacy}", "followers_url": "https://api.github.com/users/wasertech/followers", "following_url": "https://api.github.com/users/wasertech/following{/other_user}", "gists_url": "https://api.github.com/users/wasertech/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/wasertech", "id": 79070834, "login": "wasertech", "node_id": "MDQ6VXNlcjc5MDcwODM0", "organizations_url": "https://api.github.com/users/wasertech/orgs", "received_events_url": "https://api.github.com/users/wasertech/received_events", "repos_url": "https://api.github.com/users/wasertech/repos", "site_admin": false, "starred_url": "https://api.github.com/users/wasertech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wasertech/subscriptions", "type": "User", "url": "https://api.github.com/users/wasertech", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6563/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6563/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6771
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6771/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6771/comments
https://api.github.com/repos/huggingface/datasets/issues/6771/events
https://github.com/huggingface/datasets/issues/6771
2,220,131,457
I_kwDODunzps6EVISB
6,771
Datasets FileNotFoundError when trying to generate examples.
{ "avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4", "events_url": "https://api.github.com/users/RitchieP/events{/privacy}", "followers_url": "https://api.github.com/users/RitchieP/followers", "following_url": "https://api.github.com/users/RitchieP/following{/other_user}", "gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RitchieP", "id": 26197115, "login": "RitchieP", "node_id": "MDQ6VXNlcjI2MTk3MTE1", "organizations_url": "https://api.github.com/users/RitchieP/orgs", "received_events_url": "https://api.github.com/users/RitchieP/received_events", "repos_url": "https://api.github.com/users/RitchieP/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions", "type": "User", "url": "https://api.github.com/users/RitchieP", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Hi! I've opened a PR in the repo to fix this issue: https://huggingface.co/datasets/RitchieP/VerbaLex_voice/discussions/6", "@mariosasko Thanks for the PR and help! Guess I could close the issue for now. Appreciate the help!" ]
2024-04-02T10:24:57Z
2024-04-04T14:22:03Z
2024-04-04T14:22:03Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Discussed in https://github.com/huggingface/datasets/discussions/6768 <div type='discussions-op-text'> <sup>Originally posted by **RitchieP** April 1, 2024</sup> Currently, I have a dataset hosted on Huggingface with a custom script [here](https://huggingface.co/datasets/RitchieP/VerbaLex_voice). I'm loading my dataset as below. ```py from datasets import load_dataset, IterableDatasetDict dataset = IterableDatasetDict() dataset["train"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="train", use_auth_token=True, streaming=True) dataset["test"] = load_dataset("RitchieP/VerbaLex_voice", "ar", split="test", use_auth_token=True, streaming=True) ``` And when I try to see the data I have loaded with ```py list(dataset["train"].take(1)) ``` And it gives me this stack trace ``` --------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) Cell In[2], line 1 ----> 1 list(dataset["train"].take(1)) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1388, in IterableDataset.__iter__(self) 1385 yield formatter.format_row(pa_table) 1386 return -> 1388 for key, example in ex_iterable: 1389 if self.features: 1390 # `IterableDataset` automatically fills missing columns with None. 1391 # This is done with `_apply_feature_types_on_example`. 1392 example = _apply_feature_types_on_example( 1393 example, self.features, token_per_repo_id=self._token_per_repo_id 1394 ) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:1044, in TakeExamplesIterable.__iter__(self) 1043 def __iter__(self): -> 1044 yield from islice(self.ex_iterable, self.n) File /opt/conda/lib/python3.10/site-packages/datasets/iterable_dataset.py:234, in ExamplesIterable.__iter__(self) 233 def __iter__(self): --> 234 yield from self.generate_examples_fn(**self.kwargs) File ~/.cache/huggingface/modules/datasets_modules/datasets/RitchieP--VerbaLex_voice/9465eaee58383cf9d7c3e14111d7abaea56398185a641b646897d6df4e4732f7/VerbaLex_voice.py:127, in VerbaLexVoiceDataset._generate_examples(self, local_extracted_archive_paths, archives, meta_path) 125 for i, audio_archive in enumerate(archives): 126 print(audio_archive) --> 127 for path, file in audio_archive: 128 _, filename = os.path.split(path) 129 if filename in metadata: File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:869, in _IterableFromGenerator.__iter__(self) 868 def __iter__(self): --> 869 yield from self.generator(*self.args, **self.kwargs) File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:919, in ArchiveIterable._iter_from_urlpath(cls, urlpath, download_config) 915 @classmethod 916 def _iter_from_urlpath( 917 cls, urlpath: str, download_config: Optional[DownloadConfig] = None 918 ) -> Generator[Tuple, None, None]: --> 919 compression = _get_extraction_protocol(urlpath, download_config=download_config) 920 # Set block_size=0 to get faster streaming 921 # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) 922 with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: File /opt/conda/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:400, in _get_extraction_protocol(urlpath, download_config) 398 urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) 399 try: --> 400 with fsspec.open(urlpath, **(storage_options or {})) as f: 401 return _get_extraction_protocol_with_magic_number(f) 402 except FileNotFoundError: File /opt/conda/lib/python3.10/site-packages/fsspec/core.py:100, in OpenFile.__enter__(self) 97 def __enter__(self): 98 mode = self.mode.replace("t", "").replace("b", "") + "b" --> 100 f = self.fs.open(self.path, mode=mode) 102 self.fobjects = [f] 104 if self.compression is not None: File /opt/conda/lib/python3.10/site-packages/fsspec/spec.py:1307, in AbstractFileSystem.open(self, path, mode, block_size, cache_options, compression, **kwargs) 1305 else: 1306 ac = kwargs.pop("autocommit", not self._intrans) -> 1307 f = self._open( 1308 path, 1309 mode=mode, 1310 block_size=block_size, 1311 autocommit=ac, 1312 cache_options=cache_options, 1313 **kwargs, 1314 ) 1315 if compression is not None: 1316 from fsspec.compression import compr File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:180, in LocalFileSystem._open(self, path, mode, block_size, **kwargs) 178 if self.auto_mkdir and "w" in mode: 179 self.makedirs(self._parent(path), exist_ok=True) --> 180 return LocalFileOpener(path, mode, fs=self, **kwargs) File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:302, in LocalFileOpener.__init__(self, path, mode, autocommit, fs, compression, **kwargs) 300 self.compression = get_compression(path, compression) 301 self.blocksize = io.DEFAULT_BUFFER_SIZE --> 302 self._open() File /opt/conda/lib/python3.10/site-packages/fsspec/implementations/local.py:307, in LocalFileOpener._open(self) 305 if self.f is None or self.f.closed: 306 if self.autocommit or "w" not in self.mode: --> 307 self.f = open(self.path, mode=self.mode) 308 if self.compression: 309 compress = compr[self.compression] FileNotFoundError: [Errno 2] No such file or directory: '/kaggle/working/h' ``` After looking into the stack trace, and referring to the source codes, it looks like its trying to access a directory in the notebook's environment and I don't understand why. Not sure if its a bug in Datasets library, so I'm opening a discussions first. Feel free to ask for more information if needed. Appreciate any help in advance!</div> Hi, referring to the discussion title above, after further digging, I think it's an issue within the datasets library. But not quite sure where it is. If you require any more info or actions from me, please let me know. Appreciate any help in advance!
{ "avatar_url": "https://avatars.githubusercontent.com/u/26197115?v=4", "events_url": "https://api.github.com/users/RitchieP/events{/privacy}", "followers_url": "https://api.github.com/users/RitchieP/followers", "following_url": "https://api.github.com/users/RitchieP/following{/other_user}", "gists_url": "https://api.github.com/users/RitchieP/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RitchieP", "id": 26197115, "login": "RitchieP", "node_id": "MDQ6VXNlcjI2MTk3MTE1", "organizations_url": "https://api.github.com/users/RitchieP/orgs", "received_events_url": "https://api.github.com/users/RitchieP/received_events", "repos_url": "https://api.github.com/users/RitchieP/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RitchieP/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RitchieP/subscriptions", "type": "User", "url": "https://api.github.com/users/RitchieP", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6771/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6771/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5739
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5739/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5739/comments
https://api.github.com/repos/huggingface/datasets/issues/5739/events
https://github.com/huggingface/datasets/issues/5739
1,663,762,901
I_kwDODunzps5jKwHV
5,739
weird result during dataset split when data path starts with `/data`
{ "avatar_url": "https://avatars.githubusercontent.com/u/1772912?v=4", "events_url": "https://api.github.com/users/airlsyn/events{/privacy}", "followers_url": "https://api.github.com/users/airlsyn/followers", "following_url": "https://api.github.com/users/airlsyn/following{/other_user}", "gists_url": "https://api.github.com/users/airlsyn/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/airlsyn", "id": 1772912, "login": "airlsyn", "node_id": "MDQ6VXNlcjE3NzI5MTI=", "organizations_url": "https://api.github.com/users/airlsyn/orgs", "received_events_url": "https://api.github.com/users/airlsyn/received_events", "repos_url": "https://api.github.com/users/airlsyn/repos", "site_admin": false, "starred_url": "https://api.github.com/users/airlsyn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/airlsyn/subscriptions", "type": "User", "url": "https://api.github.com/users/airlsyn", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "Same problem.", "hi! \r\nI think you can run python from `/data/train/raw/` directory and load dataset as `load_dataset(\"code_contests\")` to mitigate this issue as a workaround. \r\n@ericxsun Do you want to open a PR to fix the regex? As you already found the solution :) ", "> hi! I think you can run python from `/data/train/raw/` directory and load dataset as `load_dataset(\"code_contests\")` to mitigate this issue as a workaround. @ericxsun Do you want to open a PR to fix the regex? As you already found the solution :)\r\n\r\nSure, please see https://github.com/huggingface/datasets/pull/5748 @polinaeterna ", "I think `string_to_dict` is ok, and that the issue is that it gets `'/data2/train/raw/code_contests/data/test-00000-of-00001-9c49eeff30aacaa8.parquet'` as input instead of `'data/test-00000-of-00001-9c49eeff30aacaa8.parquet'`. The path should be relative to the directory being loaded by `load_dataset`" ]
2023-04-12T04:51:35Z
2023-04-21T14:20:59Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug The regex defined here https://github.com/huggingface/datasets/blob/f2607935c4e45c70c44fcb698db0363ca7ba83d4/src/datasets/utils/py_utils.py#L158 will cause a weird result during dataset split when data path starts with `/data` ### Steps to reproduce the bug 1. clone dataset into local path ``` cd /data/train/raw/ git lfs clone https://huggingface.co/datasets/deepmind/code_contests.git ls /data/train/raw/code_contests # README.md data dataset_infos.json ls /data/train/raw/code_contests/data # test-00000-of-00001-9c49eeff30aacaa8.parquet # train-[0-9]+-of-[0-9]+-xx.parquet # valid-00000-of-00001-5e672c5751f060d3.parquet ``` 2. loading data from local ``` from datasets import load_dataset dataset = load_dataset('/data/train/raw/code_contests') FileNotFoundError: Unable to resolve any data file that matches '['data/train/raw/code_contests/data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']' at /data/train/raw/code_contests with any supported extension ``` weird path `data/train/raw/code_contests/data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*` While dive deep into `LocalDatasetModuleFactoryWithoutScript` defined in [load.py](https://github.com/huggingface/datasets/blob/f2607935c4e45c70c44fcb698db0363ca7ba83d4/src/datasets/load.py#L627) and _get_data_files_patterns https://github.com/huggingface/datasets/blob/f2607935c4e45c70c44fcb698db0363ca7ba83d4/src/datasets/data_files.py#L228. I found the weird behavior caused by `string_to_dict` 3. check `string_to_dict` ``` p = '/data/train/raw/code_contests/data/test-00000-of-00001-9c49eeff30aacaa8.parquet' split_pattern = 'data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*' string_to_dict(p, split_pattern) # {'split': 'train/raw/code_contests/data/test'} p = '/data2/train/raw/code_contests/data/test-00000-of-00001-9c49eeff30aacaa8.parquet' string_to_dict(p, split_pattern) {'split': 'test'} ``` go deep into string_to_dict https://github.com/huggingface/datasets/blob/f2607935c4e45c70c44fcb698db0363ca7ba83d4/src/datasets/utils/py_utils.py#L158. 4. test the regex: <img width="680" alt="image" src="https://user-images.githubusercontent.com/1772912/231351129-75179f01-fb9f-4f12-8fa9-0dfcc3d5f3bd.png"> <img width="679" alt="image" src="https://user-images.githubusercontent.com/1772912/231351025-009f3d83-2cf3-4e15-9ed4-6b9663dcb2ee.png"> ### Expected behavior statement in `steps to reproduce the bug` 3. check `string_to_dict` ``` p = '/data/train/raw/code_contests/data/test-00000-of-00001-9c49eeff30aacaa8.parquet' split_pattern = 'data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*' string_to_dict(p, split_pattern) # {'split': 'train/raw/code_contests/data/test'} p = '/data2/train/raw/code_contests/data/test-00000-of-00001-9c49eeff30aacaa8.parquet' string_to_dict(p, split_pattern) {'split': 'test'} ``` ### Environment info - linux(debian) - python 3.7 - datasets 2.8.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5739/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5739/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7491
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7491/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7491/comments
https://api.github.com/repos/huggingface/datasets/issues/7491/events
https://github.com/huggingface/datasets/pull/7491
2,959,085,647
PR_kwDODunzps6QtBsD
7,491
docs: update cache.mdx to include HF_DATASETS_CACHE documentation
{ "avatar_url": "https://avatars.githubusercontent.com/u/129883215?v=4", "events_url": "https://api.github.com/users/Harry-Yang0518/events{/privacy}", "followers_url": "https://api.github.com/users/Harry-Yang0518/followers", "following_url": "https://api.github.com/users/Harry-Yang0518/following{/other_user}", "gists_url": "https://api.github.com/users/Harry-Yang0518/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Harry-Yang0518", "id": 129883215, "login": "Harry-Yang0518", "node_id": "U_kgDOB73cTw", "organizations_url": "https://api.github.com/users/Harry-Yang0518/orgs", "received_events_url": "https://api.github.com/users/Harry-Yang0518/received_events", "repos_url": "https://api.github.com/users/Harry-Yang0518/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Harry-Yang0518/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Harry-Yang0518/subscriptions", "type": "User", "url": "https://api.github.com/users/Harry-Yang0518", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Already included HF_DATASETS_CACHE" ]
2025-03-30T20:35:03Z
2025-03-30T20:36:40Z
2025-03-30T20:36:40Z
NONE
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/129883215?v=4", "events_url": "https://api.github.com/users/Harry-Yang0518/events{/privacy}", "followers_url": "https://api.github.com/users/Harry-Yang0518/followers", "following_url": "https://api.github.com/users/Harry-Yang0518/following{/other_user}", "gists_url": "https://api.github.com/users/Harry-Yang0518/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Harry-Yang0518", "id": 129883215, "login": "Harry-Yang0518", "node_id": "U_kgDOB73cTw", "organizations_url": "https://api.github.com/users/Harry-Yang0518/orgs", "received_events_url": "https://api.github.com/users/Harry-Yang0518/received_events", "repos_url": "https://api.github.com/users/Harry-Yang0518/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Harry-Yang0518/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Harry-Yang0518/subscriptions", "type": "User", "url": "https://api.github.com/users/Harry-Yang0518", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7491/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7491/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7491.diff", "html_url": "https://github.com/huggingface/datasets/pull/7491", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7491.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7491" }
https://api.github.com/repos/huggingface/datasets/issues/6448
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6448/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6448/comments
https://api.github.com/repos/huggingface/datasets/issues/6448/events
https://github.com/huggingface/datasets/pull/6448
2,008,614,985
PR_kwDODunzps5gQBsE
6,448
Use parquet export if possible
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005177 / 0.011353 (-0.006176) | 0.003002 / 0.011008 (-0.008006) | 0.061915 / 0.038508 (0.023407) | 0.052065 / 0.023109 (0.028956) | 0.246114 / 0.275898 (-0.029784) | 0.273974 / 0.323480 (-0.049506) | 0.002983 / 0.007986 (-0.005003) | 0.002444 / 0.004328 (-0.001885) | 0.048424 / 0.004250 (0.044174) | 0.039609 / 0.037052 (0.002557) | 0.257771 / 0.258489 (-0.000718) | 0.286228 / 0.293841 (-0.007613) | 0.023925 / 0.128546 (-0.104621) | 0.007248 / 0.075646 (-0.068398) | 0.202205 / 0.419271 (-0.217067) | 0.037124 / 0.043533 (-0.006409) | 0.254872 / 0.255139 (-0.000267) | 0.275252 / 0.283200 (-0.007947) | 0.019251 / 0.141683 (-0.122432) | 1.074921 / 1.452155 (-0.377234) | 1.146515 / 1.492716 (-0.346202) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091998 / 0.018006 (0.073992) | 0.299146 / 0.000490 (0.298656) | 0.000240 / 0.000200 (0.000040) | 0.000054 / 0.000054 (0.000000) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019266 / 0.037411 (-0.018145) | 0.062560 / 0.014526 (0.048034) | 0.075012 / 0.176557 (-0.101544) | 0.120077 / 0.737135 (-0.617058) | 0.077851 / 0.296338 (-0.218488) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290629 / 0.215209 (0.075420) | 2.823847 / 2.077655 (0.746192) | 1.516966 / 1.504120 (0.012846) | 1.393383 / 1.541195 (-0.147812) | 1.427688 / 1.468490 (-0.040802) | 0.407456 / 4.584777 (-4.177321) | 2.378280 / 3.745712 (-1.367433) | 2.689800 / 5.269862 (-2.580061) | 1.588037 / 4.565676 (-2.977640) | 0.045837 / 0.424275 (-0.378438) | 0.004884 / 0.007607 (-0.002724) | 0.340464 / 0.226044 (0.114420) | 3.377158 / 2.268929 (1.108230) | 1.897854 / 55.444624 (-53.546771) | 1.588285 / 6.876477 (-5.288191) | 1.651708 / 2.142072 (-0.490364) | 0.482018 / 4.805227 (-4.323209) | 0.101583 / 6.500664 (-6.399081) | 0.042306 / 0.075469 (-0.033163) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.948659 / 1.841788 (-0.893128) | 11.809778 / 8.074308 (3.735470) | 10.481896 / 10.191392 (0.290504) | 0.143538 / 0.680424 (-0.536885) | 0.014105 / 0.534201 (-0.520096) | 0.272278 / 0.579283 (-0.307005) | 0.264241 / 0.434364 (-0.170123) | 0.307187 / 0.540337 (-0.233150) | 0.401270 / 1.386936 (-0.985666) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004831 / 0.011353 (-0.006521) | 0.002896 / 0.011008 (-0.008112) | 0.047479 / 0.038508 (0.008971) | 0.050665 / 0.023109 (0.027555) | 0.275243 / 0.275898 (-0.000655) | 0.296547 / 0.323480 (-0.026933) | 0.004022 / 0.007986 (-0.003963) | 0.002425 / 0.004328 (-0.001904) | 0.047086 / 0.004250 (0.042836) | 0.039611 / 0.037052 (0.002558) | 0.275272 / 0.258489 (0.016783) | 0.302429 / 0.293841 (0.008588) | 0.024308 / 0.128546 (-0.104238) | 0.007167 / 0.075646 (-0.068479) | 0.052825 / 0.419271 (-0.366446) | 0.032319 / 0.043533 (-0.011213) | 0.273334 / 0.255139 (0.018195) | 0.291161 / 0.283200 (0.007961) | 0.017918 / 0.141683 (-0.123764) | 1.110005 / 1.452155 (-0.342150) | 1.176616 / 1.492716 (-0.316100) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092478 / 0.018006 (0.074471) | 0.311431 / 0.000490 (0.310942) | 0.000237 / 0.000200 (0.000037) | 0.000059 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021979 / 0.037411 (-0.015432) | 0.080617 / 0.014526 (0.066091) | 0.081534 / 0.176557 (-0.095023) | 0.121073 / 0.737135 (-0.616062) | 0.083235 / 0.296338 (-0.213104) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289527 / 0.215209 (0.074318) | 2.839668 / 2.077655 (0.762013) | 1.601737 / 1.504120 (0.097617) | 1.496028 / 1.541195 (-0.045167) | 1.511933 / 1.468490 (0.043443) | 0.399819 / 4.584777 (-4.184958) | 2.394147 / 3.745712 (-1.351565) | 2.520767 / 5.269862 (-2.749095) | 1.589496 / 4.565676 (-2.976180) | 0.046673 / 0.424275 (-0.377602) | 0.004858 / 0.007607 (-0.002749) | 0.357986 / 0.226044 (0.131941) | 3.376217 / 2.268929 (1.107289) | 1.981853 / 55.444624 (-53.462771) | 1.682240 / 6.876477 (-5.194236) | 1.830643 / 2.142072 (-0.311429) | 0.478286 / 4.805227 (-4.326941) | 0.099589 / 6.500664 (-6.401075) | 0.041173 / 0.075469 (-0.034296) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.985160 / 1.841788 (-0.856628) | 12.312963 / 8.074308 (4.238655) | 10.577225 / 10.191392 (0.385833) | 0.130167 / 0.680424 (-0.550257) | 0.016657 / 0.534201 (-0.517544) | 0.271330 / 0.579283 (-0.307953) | 0.276979 / 0.434364 (-0.157385) | 0.304904 / 0.540337 (-0.235434) | 0.412090 / 1.386936 (-0.974846) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1adc80151e892122ecb60f4e0b4572b136b2dd47 \"CML watermark\")\n", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6448). All of your documentation changes will be reflected on that endpoint.", "hooray! very excited about this", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005039 / 0.011353 (-0.006314) | 0.003577 / 0.011008 (-0.007431) | 0.062892 / 0.038508 (0.024384) | 0.056334 / 0.023109 (0.033225) | 0.252281 / 0.275898 (-0.023617) | 0.274945 / 0.323480 (-0.048535) | 0.003906 / 0.007986 (-0.004080) | 0.002483 / 0.004328 (-0.001845) | 0.049006 / 0.004250 (0.044756) | 0.038375 / 0.037052 (0.001323) | 0.257376 / 0.258489 (-0.001113) | 0.292512 / 0.293841 (-0.001328) | 0.027134 / 0.128546 (-0.101412) | 0.010579 / 0.075646 (-0.065068) | 0.212021 / 0.419271 (-0.207250) | 0.035851 / 0.043533 (-0.007682) | 0.258076 / 0.255139 (0.002937) | 0.271758 / 0.283200 (-0.011442) | 0.018222 / 0.141683 (-0.123461) | 1.120481 / 1.452155 (-0.331674) | 1.187007 / 1.492716 (-0.305710) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094986 / 0.018006 (0.076980) | 0.302121 / 0.000490 (0.301631) | 0.000211 / 0.000200 (0.000011) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019260 / 0.037411 (-0.018152) | 0.062909 / 0.014526 (0.048383) | 0.075644 / 0.176557 (-0.100912) | 0.120966 / 0.737135 (-0.616170) | 0.076678 / 0.296338 (-0.219661) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286754 / 0.215209 (0.071545) | 2.797467 / 2.077655 (0.719812) | 1.436798 / 1.504120 (-0.067322) | 1.315032 / 1.541195 (-0.226163) | 1.367841 / 1.468490 (-0.100649) | 0.578917 / 4.584777 (-4.005860) | 2.439773 / 3.745712 (-1.305939) | 2.932779 / 5.269862 (-2.337082) | 1.843895 / 4.565676 (-2.721782) | 0.063351 / 0.424275 (-0.360925) | 0.004998 / 0.007607 (-0.002610) | 0.347385 / 0.226044 (0.121340) | 3.449969 / 2.268929 (1.181040) | 1.857734 / 55.444624 (-53.586890) | 1.541341 / 6.876477 (-5.335136) | 1.574915 / 2.142072 (-0.567158) | 0.660178 / 4.805227 (-4.145049) | 0.117686 / 6.500664 (-6.382978) | 0.042602 / 0.075469 (-0.032867) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.937735 / 1.841788 (-0.904052) | 11.962091 / 8.074308 (3.887783) | 10.401715 / 10.191392 (0.210323) | 0.142200 / 0.680424 (-0.538224) | 0.014137 / 0.534201 (-0.520064) | 0.289853 / 0.579283 (-0.289430) | 0.267100 / 0.434364 (-0.167264) | 0.323401 / 0.540337 (-0.216936) | 0.418665 / 1.386936 (-0.968271) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005480 / 0.011353 (-0.005873) | 0.003401 / 0.011008 (-0.007607) | 0.049304 / 0.038508 (0.010796) | 0.062043 / 0.023109 (0.038934) | 0.270571 / 0.275898 (-0.005327) | 0.295226 / 0.323480 (-0.028254) | 0.004152 / 0.007986 (-0.003834) | 0.002511 / 0.004328 (-0.001817) | 0.048480 / 0.004250 (0.044229) | 0.043964 / 0.037052 (0.006912) | 0.273545 / 0.258489 (0.015056) | 0.295152 / 0.293841 (0.001311) | 0.029224 / 0.128546 (-0.099322) | 0.010629 / 0.075646 (-0.065018) | 0.057433 / 0.419271 (-0.361839) | 0.033115 / 0.043533 (-0.010418) | 0.269893 / 0.255139 (0.014754) | 0.288658 / 0.283200 (0.005459) | 0.018216 / 0.141683 (-0.123467) | 1.123039 / 1.452155 (-0.329116) | 1.182892 / 1.492716 (-0.309825) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095948 / 0.018006 (0.077942) | 0.305811 / 0.000490 (0.305321) | 0.000221 / 0.000200 (0.000021) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022996 / 0.037411 (-0.014415) | 0.073836 / 0.014526 (0.059310) | 0.082658 / 0.176557 (-0.093899) | 0.121970 / 0.737135 (-0.615166) | 0.086096 / 0.296338 (-0.210242) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291032 / 0.215209 (0.075823) | 2.864613 / 2.077655 (0.786958) | 1.567530 / 1.504120 (0.063410) | 1.460291 / 1.541195 (-0.080903) | 1.527066 / 1.468490 (0.058576) | 0.571160 / 4.584777 (-4.013617) | 2.465261 / 3.745712 (-1.280451) | 2.915547 / 5.269862 (-2.354314) | 1.835822 / 4.565676 (-2.729855) | 0.064328 / 0.424275 (-0.359947) | 0.005061 / 0.007607 (-0.002546) | 0.357105 / 0.226044 (0.131061) | 3.491363 / 2.268929 (1.222435) | 1.943213 / 55.444624 (-53.501412) | 1.675778 / 6.876477 (-5.200699) | 1.719016 / 2.142072 (-0.423057) | 0.658993 / 4.805227 (-4.146235) | 0.122320 / 6.500664 (-6.378344) | 0.049030 / 0.075469 (-0.026439) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.964762 / 1.841788 (-0.877025) | 12.367251 / 8.074308 (4.292943) | 10.886213 / 10.191392 (0.694821) | 0.141533 / 0.680424 (-0.538891) | 0.015646 / 0.534201 (-0.518555) | 0.288583 / 0.579283 (-0.290700) | 0.280353 / 0.434364 (-0.154010) | 0.329095 / 0.540337 (-0.211242) | 0.565118 / 1.386936 (-0.821818) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#493bf695dc3ee6cc81bfd0aae6a38f70547bb752 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006475 / 0.011353 (-0.004878) | 0.004080 / 0.011008 (-0.006928) | 0.066479 / 0.038508 (0.027971) | 0.073270 / 0.023109 (0.050161) | 0.244412 / 0.275898 (-0.031486) | 0.273778 / 0.323480 (-0.049702) | 0.003186 / 0.007986 (-0.004800) | 0.003419 / 0.004328 (-0.000910) | 0.049743 / 0.004250 (0.045492) | 0.043581 / 0.037052 (0.006529) | 0.248215 / 0.258489 (-0.010274) | 0.280873 / 0.293841 (-0.012967) | 0.029282 / 0.128546 (-0.099264) | 0.011241 / 0.075646 (-0.064405) | 0.215031 / 0.419271 (-0.204241) | 0.038764 / 0.043533 (-0.004769) | 0.259363 / 0.255139 (0.004224) | 0.279253 / 0.283200 (-0.003946) | 0.019524 / 0.141683 (-0.122159) | 1.104735 / 1.452155 (-0.347420) | 1.159823 / 1.492716 (-0.332894) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.108383 / 0.018006 (0.090377) | 0.332904 / 0.000490 (0.332415) | 0.000222 / 0.000200 (0.000022) | 0.000065 / 0.000054 (0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020693 / 0.037411 (-0.016719) | 0.071764 / 0.014526 (0.057238) | 0.077073 / 0.176557 (-0.099484) | 0.124604 / 0.737135 (-0.612532) | 0.078057 / 0.296338 (-0.218282) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291014 / 0.215209 (0.075805) | 2.865885 / 2.077655 (0.788231) | 1.506141 / 1.504120 (0.002021) | 1.435924 / 1.541195 (-0.105271) | 1.461994 / 1.468490 (-0.006497) | 0.571779 / 4.584777 (-4.012998) | 2.461950 / 3.745712 (-1.283762) | 3.079771 / 5.269862 (-2.190091) | 1.933337 / 4.565676 (-2.632339) | 0.063405 / 0.424275 (-0.360870) | 0.005203 / 0.007607 (-0.002404) | 0.345077 / 0.226044 (0.119032) | 3.487189 / 2.268929 (1.218261) | 1.903733 / 55.444624 (-53.540891) | 1.705596 / 6.876477 (-5.170880) | 1.718849 / 2.142072 (-0.423223) | 0.658745 / 4.805227 (-4.146482) | 0.120847 / 6.500664 (-6.379817) | 0.045670 / 0.075469 (-0.029799) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.965969 / 1.841788 (-0.875819) | 13.520489 / 8.074308 (5.446181) | 12.322363 / 10.191392 (2.130971) | 0.146605 / 0.680424 (-0.533819) | 0.015061 / 0.534201 (-0.519140) | 0.298125 / 0.579283 (-0.281159) | 0.276864 / 0.434364 (-0.157500) | 0.326787 / 0.540337 (-0.213550) | 0.436897 / 1.386936 (-0.950039) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005862 / 0.011353 (-0.005491) | 0.003716 / 0.011008 (-0.007292) | 0.052849 / 0.038508 (0.014341) | 0.072114 / 0.023109 (0.049005) | 0.277800 / 0.275898 (0.001902) | 0.325321 / 0.323480 (0.001841) | 0.004428 / 0.007986 (-0.003557) | 0.002527 / 0.004328 (-0.001801) | 0.048847 / 0.004250 (0.044596) | 0.047355 / 0.037052 (0.010303) | 0.279331 / 0.258489 (0.020842) | 0.310477 / 0.293841 (0.016636) | 0.029661 / 0.128546 (-0.098886) | 0.010812 / 0.075646 (-0.064834) | 0.059803 / 0.419271 (-0.359469) | 0.033554 / 0.043533 (-0.009978) | 0.276890 / 0.255139 (0.021751) | 0.308911 / 0.283200 (0.025712) | 0.020752 / 0.141683 (-0.120931) | 1.120896 / 1.452155 (-0.331259) | 1.186428 / 1.492716 (-0.306288) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.106551 / 0.018006 (0.088545) | 0.354455 / 0.000490 (0.353966) | 0.000353 / 0.000200 (0.000153) | 0.000069 / 0.000054 (0.000015) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023488 / 0.037411 (-0.013923) | 0.080548 / 0.014526 (0.066022) | 0.084431 / 0.176557 (-0.092126) | 0.140698 / 0.737135 (-0.596438) | 0.085692 / 0.296338 (-0.210647) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.314253 / 0.215209 (0.099044) | 2.993236 / 2.077655 (0.915582) | 1.639013 / 1.504120 (0.134893) | 1.543966 / 1.541195 (0.002771) | 1.567732 / 1.468490 (0.099242) | 0.565857 / 4.584777 (-4.018920) | 2.545339 / 3.745712 (-1.200373) | 3.134546 / 5.269862 (-2.135316) | 1.940350 / 4.565676 (-2.625326) | 0.063847 / 0.424275 (-0.360429) | 0.005079 / 0.007607 (-0.002528) | 0.365762 / 0.226044 (0.139718) | 3.610921 / 2.268929 (1.341993) | 2.035151 / 55.444624 (-53.409473) | 1.773409 / 6.876477 (-5.103068) | 1.790332 / 2.142072 (-0.351741) | 0.683019 / 4.805227 (-4.122209) | 0.119566 / 6.500664 (-6.381099) | 0.043578 / 0.075469 (-0.031891) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.996568 / 1.841788 (-0.845219) | 14.094366 / 8.074308 (6.020058) | 12.433600 / 10.191392 (2.242208) | 0.139835 / 0.680424 (-0.540589) | 0.016454 / 0.534201 (-0.517747) | 0.294073 / 0.579283 (-0.285210) | 0.309032 / 0.434364 (-0.125332) | 0.330699 / 0.540337 (-0.209638) | 0.619392 / 1.386936 (-0.767544) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#026fbce1c93a30188b6d0646bb975da8f56e2a2f \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005389 / 0.011353 (-0.005964) | 0.003209 / 0.011008 (-0.007799) | 0.061610 / 0.038508 (0.023102) | 0.049781 / 0.023109 (0.026672) | 0.240208 / 0.275898 (-0.035690) | 0.263307 / 0.323480 (-0.060173) | 0.002908 / 0.007986 (-0.005078) | 0.002375 / 0.004328 (-0.001953) | 0.047462 / 0.004250 (0.043212) | 0.038643 / 0.037052 (0.001591) | 0.246287 / 0.258489 (-0.012202) | 0.278715 / 0.293841 (-0.015126) | 0.027507 / 0.128546 (-0.101039) | 0.010168 / 0.075646 (-0.065479) | 0.204131 / 0.419271 (-0.215140) | 0.035452 / 0.043533 (-0.008081) | 0.251721 / 0.255139 (-0.003418) | 0.266642 / 0.283200 (-0.016558) | 0.017741 / 0.141683 (-0.123942) | 1.094672 / 1.452155 (-0.357482) | 1.162715 / 1.492716 (-0.330002) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092154 / 0.018006 (0.074148) | 0.301376 / 0.000490 (0.300886) | 0.000217 / 0.000200 (0.000017) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018534 / 0.037411 (-0.018877) | 0.061995 / 0.014526 (0.047469) | 0.072654 / 0.176557 (-0.103903) | 0.119501 / 0.737135 (-0.617635) | 0.073756 / 0.296338 (-0.222583) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280066 / 0.215209 (0.064857) | 2.744207 / 2.077655 (0.666553) | 1.483367 / 1.504120 (-0.020753) | 1.386173 / 1.541195 (-0.155022) | 1.381833 / 1.468490 (-0.086657) | 0.552780 / 4.584777 (-4.031997) | 2.395541 / 3.745712 (-1.350171) | 2.747507 / 5.269862 (-2.522355) | 1.735074 / 4.565676 (-2.830602) | 0.062096 / 0.424275 (-0.362179) | 0.004905 / 0.007607 (-0.002702) | 0.338327 / 0.226044 (0.112283) | 3.365391 / 2.268929 (1.096462) | 1.839663 / 55.444624 (-53.604961) | 1.577535 / 6.876477 (-5.298942) | 1.558054 / 2.142072 (-0.584018) | 0.636520 / 4.805227 (-4.168708) | 0.116182 / 6.500664 (-6.384482) | 0.042078 / 0.075469 (-0.033391) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.938512 / 1.841788 (-0.903276) | 11.455749 / 8.074308 (3.381441) | 10.510985 / 10.191392 (0.319593) | 0.140865 / 0.680424 (-0.539559) | 0.014073 / 0.534201 (-0.520128) | 0.294747 / 0.579283 (-0.284536) | 0.266147 / 0.434364 (-0.168217) | 0.325354 / 0.540337 (-0.214984) | 0.422182 / 1.386936 (-0.964754) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005231 / 0.011353 (-0.006122) | 0.003032 / 0.011008 (-0.007977) | 0.049608 / 0.038508 (0.011099) | 0.051441 / 0.023109 (0.028332) | 0.273812 / 0.275898 (-0.002086) | 0.294318 / 0.323480 (-0.029162) | 0.003958 / 0.007986 (-0.004028) | 0.002384 / 0.004328 (-0.001944) | 0.047942 / 0.004250 (0.043691) | 0.039179 / 0.037052 (0.002127) | 0.277504 / 0.258489 (0.019014) | 0.299713 / 0.293841 (0.005872) | 0.028989 / 0.128546 (-0.099557) | 0.010267 / 0.075646 (-0.065379) | 0.058318 / 0.419271 (-0.360954) | 0.032214 / 0.043533 (-0.011318) | 0.277964 / 0.255139 (0.022825) | 0.293055 / 0.283200 (0.009856) | 0.018532 / 0.141683 (-0.123151) | 1.128620 / 1.452155 (-0.323535) | 1.187365 / 1.492716 (-0.305351) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092137 / 0.018006 (0.074130) | 0.299726 / 0.000490 (0.299236) | 0.000222 / 0.000200 (0.000022) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021342 / 0.037411 (-0.016070) | 0.069943 / 0.014526 (0.055417) | 0.079862 / 0.176557 (-0.096694) | 0.118917 / 0.737135 (-0.618218) | 0.081861 / 0.296338 (-0.214477) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295883 / 0.215209 (0.080674) | 2.881640 / 2.077655 (0.803986) | 1.597705 / 1.504120 (0.093585) | 1.473220 / 1.541195 (-0.067975) | 1.501006 / 1.468490 (0.032516) | 0.559409 / 4.584777 (-4.025368) | 2.442709 / 3.745712 (-1.303003) | 2.742139 / 5.269862 (-2.527723) | 1.726002 / 4.565676 (-2.839674) | 0.062436 / 0.424275 (-0.361840) | 0.004896 / 0.007607 (-0.002711) | 0.349203 / 0.226044 (0.123159) | 3.435175 / 2.268929 (1.166247) | 1.954888 / 55.444624 (-53.489737) | 1.666233 / 6.876477 (-5.210243) | 1.680852 / 2.142072 (-0.461221) | 0.644271 / 4.805227 (-4.160956) | 0.115160 / 6.500664 (-6.385504) | 0.040681 / 0.075469 (-0.034788) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963810 / 1.841788 (-0.877977) | 11.860860 / 8.074308 (3.786552) | 10.541703 / 10.191392 (0.350311) | 0.131532 / 0.680424 (-0.548892) | 0.016790 / 0.534201 (-0.517411) | 0.286695 / 0.579283 (-0.292588) | 0.279628 / 0.434364 (-0.154735) | 0.324622 / 0.540337 (-0.215715) | 0.535507 / 1.386936 (-0.851429) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#11217347e4bcfe1aaf794d164a5dd9f085b2f682 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005672 / 0.011353 (-0.005681) | 0.003411 / 0.011008 (-0.007597) | 0.062528 / 0.038508 (0.024020) | 0.055209 / 0.023109 (0.032100) | 0.248366 / 0.275898 (-0.027532) | 0.279522 / 0.323480 (-0.043957) | 0.002907 / 0.007986 (-0.005079) | 0.002369 / 0.004328 (-0.001959) | 0.047982 / 0.004250 (0.043731) | 0.039009 / 0.037052 (0.001956) | 0.256422 / 0.258489 (-0.002067) | 0.288530 / 0.293841 (-0.005311) | 0.028164 / 0.128546 (-0.100382) | 0.010448 / 0.075646 (-0.065198) | 0.208863 / 0.419271 (-0.210408) | 0.036291 / 0.043533 (-0.007242) | 0.251642 / 0.255139 (-0.003497) | 0.275589 / 0.283200 (-0.007610) | 0.019839 / 0.141683 (-0.121844) | 1.092800 / 1.452155 (-0.359355) | 1.147950 / 1.492716 (-0.344766) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094920 / 0.018006 (0.076914) | 0.303049 / 0.000490 (0.302559) | 0.000199 / 0.000200 (-0.000001) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018820 / 0.037411 (-0.018591) | 0.063319 / 0.014526 (0.048793) | 0.073644 / 0.176557 (-0.102912) | 0.120045 / 0.737135 (-0.617091) | 0.076219 / 0.296338 (-0.220119) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.283897 / 0.215209 (0.068688) | 2.822836 / 2.077655 (0.745182) | 1.490505 / 1.504120 (-0.013615) | 1.359777 / 1.541195 (-0.181418) | 1.420536 / 1.468490 (-0.047954) | 0.562308 / 4.584777 (-4.022469) | 2.419249 / 3.745712 (-1.326463) | 2.827620 / 5.269862 (-2.442241) | 1.783171 / 4.565676 (-2.782505) | 0.063206 / 0.424275 (-0.361069) | 0.004966 / 0.007607 (-0.002641) | 0.339647 / 0.226044 (0.113602) | 3.378157 / 2.268929 (1.109229) | 1.873221 / 55.444624 (-53.571403) | 1.606367 / 6.876477 (-5.270109) | 1.624976 / 2.142072 (-0.517096) | 0.652653 / 4.805227 (-4.152574) | 0.117997 / 6.500664 (-6.382667) | 0.041955 / 0.075469 (-0.033514) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.961420 / 1.841788 (-0.880368) | 11.807624 / 8.074308 (3.733316) | 10.668249 / 10.191392 (0.476857) | 0.141855 / 0.680424 (-0.538569) | 0.014451 / 0.534201 (-0.519750) | 0.289706 / 0.579283 (-0.289577) | 0.268392 / 0.434364 (-0.165972) | 0.323435 / 0.540337 (-0.216903) | 0.420667 / 1.386936 (-0.966269) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005382 / 0.011353 (-0.005971) | 0.003361 / 0.011008 (-0.007647) | 0.048420 / 0.038508 (0.009912) | 0.053702 / 0.023109 (0.030593) | 0.286976 / 0.275898 (0.011078) | 0.296708 / 0.323480 (-0.026772) | 0.004013 / 0.007986 (-0.003972) | 0.002444 / 0.004328 (-0.001884) | 0.047797 / 0.004250 (0.043547) | 0.042361 / 0.037052 (0.005309) | 0.277543 / 0.258489 (0.019054) | 0.300736 / 0.293841 (0.006896) | 0.029894 / 0.128546 (-0.098653) | 0.014119 / 0.075646 (-0.061527) | 0.057636 / 0.419271 (-0.361636) | 0.032533 / 0.043533 (-0.010999) | 0.280963 / 0.255139 (0.025824) | 0.291305 / 0.283200 (0.008106) | 0.018391 / 0.141683 (-0.123292) | 1.140042 / 1.452155 (-0.312113) | 1.179485 / 1.492716 (-0.313231) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094668 / 0.018006 (0.076661) | 0.301677 / 0.000490 (0.301187) | 0.000245 / 0.000200 (0.000045) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021376 / 0.037411 (-0.016036) | 0.070628 / 0.014526 (0.056102) | 0.082249 / 0.176557 (-0.094308) | 0.120423 / 0.737135 (-0.616712) | 0.083792 / 0.296338 (-0.212546) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298884 / 0.215209 (0.083675) | 2.931849 / 2.077655 (0.854194) | 1.591888 / 1.504120 (0.087768) | 1.455781 / 1.541195 (-0.085414) | 1.500312 / 1.468490 (0.031822) | 0.558466 / 4.584777 (-4.026311) | 2.450449 / 3.745712 (-1.295263) | 2.842768 / 5.269862 (-2.427094) | 1.755614 / 4.565676 (-2.810062) | 0.063200 / 0.424275 (-0.361075) | 0.005022 / 0.007607 (-0.002585) | 0.358282 / 0.226044 (0.132238) | 3.575392 / 2.268929 (1.306464) | 1.960258 / 55.444624 (-53.484366) | 1.675518 / 6.876477 (-5.200959) | 1.696630 / 2.142072 (-0.445442) | 0.647185 / 4.805227 (-4.158042) | 0.117038 / 6.500664 (-6.383626) | 0.041622 / 0.075469 (-0.033848) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.962503 / 1.841788 (-0.879285) | 12.194950 / 8.074308 (4.120642) | 10.662233 / 10.191392 (0.470841) | 0.131618 / 0.680424 (-0.548806) | 0.016000 / 0.534201 (-0.518201) | 0.291546 / 0.579283 (-0.287737) | 0.279537 / 0.434364 (-0.154827) | 0.328716 / 0.540337 (-0.211622) | 0.547565 / 1.386936 (-0.839371) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4de8f5f09f60613d47b5d7eb901752321c7b6a49 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005209 / 0.011353 (-0.006144) | 0.003017 / 0.011008 (-0.007991) | 0.062017 / 0.038508 (0.023509) | 0.048268 / 0.023109 (0.025158) | 0.246384 / 0.275898 (-0.029514) | 0.270441 / 0.323480 (-0.053039) | 0.002763 / 0.007986 (-0.005222) | 0.003140 / 0.004328 (-0.001188) | 0.048720 / 0.004250 (0.044470) | 0.038175 / 0.037052 (0.001123) | 0.254184 / 0.258489 (-0.004306) | 0.275515 / 0.293841 (-0.018326) | 0.027309 / 0.128546 (-0.101238) | 0.010507 / 0.075646 (-0.065140) | 0.210315 / 0.419271 (-0.208956) | 0.035203 / 0.043533 (-0.008329) | 0.253015 / 0.255139 (-0.002124) | 0.271465 / 0.283200 (-0.011734) | 0.019543 / 0.141683 (-0.122140) | 1.119242 / 1.452155 (-0.332913) | 1.149359 / 1.492716 (-0.343357) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088935 / 0.018006 (0.070928) | 0.293922 / 0.000490 (0.293432) | 0.000202 / 0.000200 (0.000002) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018174 / 0.037411 (-0.019237) | 0.060215 / 0.014526 (0.045689) | 0.072868 / 0.176557 (-0.103689) | 0.117998 / 0.737135 (-0.619137) | 0.074159 / 0.296338 (-0.222179) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289229 / 0.215209 (0.074020) | 2.840414 / 2.077655 (0.762759) | 1.468357 / 1.504120 (-0.035763) | 1.347714 / 1.541195 (-0.193481) | 1.363704 / 1.468490 (-0.104786) | 0.572059 / 4.584777 (-4.012718) | 2.400631 / 3.745712 (-1.345081) | 2.755779 / 5.269862 (-2.514083) | 1.740937 / 4.565676 (-2.824739) | 0.063473 / 0.424275 (-0.360802) | 0.005012 / 0.007607 (-0.002595) | 0.336057 / 0.226044 (0.110012) | 3.382126 / 2.268929 (1.113197) | 1.807838 / 55.444624 (-53.636786) | 1.534594 / 6.876477 (-5.341883) | 1.529951 / 2.142072 (-0.612121) | 0.636661 / 4.805227 (-4.168566) | 0.117090 / 6.500664 (-6.383574) | 0.042310 / 0.075469 (-0.033160) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.924440 / 1.841788 (-0.917347) | 11.120517 / 8.074308 (3.046209) | 10.177210 / 10.191392 (-0.014182) | 0.139060 / 0.680424 (-0.541364) | 0.013818 / 0.534201 (-0.520383) | 0.285634 / 0.579283 (-0.293649) | 0.268657 / 0.434364 (-0.165706) | 0.325842 / 0.540337 (-0.214496) | 0.439902 / 1.386936 (-0.947034) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005202 / 0.011353 (-0.006150) | 0.003002 / 0.011008 (-0.008006) | 0.048729 / 0.038508 (0.010221) | 0.048178 / 0.023109 (0.025069) | 0.288573 / 0.275898 (0.012675) | 0.311122 / 0.323480 (-0.012358) | 0.003953 / 0.007986 (-0.004033) | 0.002544 / 0.004328 (-0.001785) | 0.047762 / 0.004250 (0.043511) | 0.039711 / 0.037052 (0.002658) | 0.308389 / 0.258489 (0.049900) | 0.321913 / 0.293841 (0.028072) | 0.029166 / 0.128546 (-0.099380) | 0.010697 / 0.075646 (-0.064950) | 0.057758 / 0.419271 (-0.361514) | 0.032743 / 0.043533 (-0.010789) | 0.290933 / 0.255139 (0.035794) | 0.309404 / 0.283200 (0.026205) | 0.017691 / 0.141683 (-0.123992) | 1.157713 / 1.452155 (-0.294442) | 1.210485 / 1.492716 (-0.282231) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088959 / 0.018006 (0.070953) | 0.298531 / 0.000490 (0.298041) | 0.000221 / 0.000200 (0.000021) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021129 / 0.037411 (-0.016283) | 0.068419 / 0.014526 (0.053893) | 0.079328 / 0.176557 (-0.097228) | 0.118603 / 0.737135 (-0.618532) | 0.080489 / 0.296338 (-0.215850) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.292464 / 0.215209 (0.077254) | 2.898221 / 2.077655 (0.820566) | 1.600868 / 1.504120 (0.096748) | 1.485128 / 1.541195 (-0.056067) | 1.493091 / 1.468490 (0.024600) | 0.576117 / 4.584777 (-4.008660) | 2.450440 / 3.745712 (-1.295273) | 2.746026 / 5.269862 (-2.523836) | 1.722555 / 4.565676 (-2.843122) | 0.062869 / 0.424275 (-0.361406) | 0.004918 / 0.007607 (-0.002689) | 0.348470 / 0.226044 (0.122425) | 3.420267 / 2.268929 (1.151339) | 1.942973 / 55.444624 (-53.501651) | 1.667684 / 6.876477 (-5.208793) | 1.669618 / 2.142072 (-0.472454) | 0.630275 / 4.805227 (-4.174952) | 0.115072 / 6.500664 (-6.385592) | 0.040430 / 0.075469 (-0.035039) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.989827 / 1.841788 (-0.851961) | 11.578068 / 8.074308 (3.503760) | 10.636060 / 10.191392 (0.444668) | 0.131943 / 0.680424 (-0.548481) | 0.015915 / 0.534201 (-0.518286) | 0.287277 / 0.579283 (-0.292006) | 0.279451 / 0.434364 (-0.154913) | 0.325485 / 0.540337 (-0.214852) | 0.544635 / 1.386936 (-0.842301) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f22579be6c73867ac1a3c03e925abaf4872f8437 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005144 / 0.011353 (-0.006209) | 0.003686 / 0.011008 (-0.007322) | 0.064003 / 0.038508 (0.025495) | 0.058962 / 0.023109 (0.035853) | 0.233753 / 0.275898 (-0.042145) | 0.255802 / 0.323480 (-0.067677) | 0.003871 / 0.007986 (-0.004115) | 0.002609 / 0.004328 (-0.001719) | 0.048675 / 0.004250 (0.044425) | 0.037550 / 0.037052 (0.000498) | 0.240658 / 0.258489 (-0.017831) | 0.272303 / 0.293841 (-0.021538) | 0.027455 / 0.128546 (-0.101091) | 0.010706 / 0.075646 (-0.064941) | 0.210878 / 0.419271 (-0.208393) | 0.035763 / 0.043533 (-0.007770) | 0.239937 / 0.255139 (-0.015202) | 0.262520 / 0.283200 (-0.020680) | 0.017676 / 0.141683 (-0.124006) | 1.095036 / 1.452155 (-0.357118) | 1.178318 / 1.492716 (-0.314399) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095310 / 0.018006 (0.077304) | 0.307485 / 0.000490 (0.306995) | 0.000212 / 0.000200 (0.000013) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018630 / 0.037411 (-0.018781) | 0.060461 / 0.014526 (0.045936) | 0.073117 / 0.176557 (-0.103440) | 0.119737 / 0.737135 (-0.617399) | 0.073909 / 0.296338 (-0.222430) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280938 / 0.215209 (0.065729) | 2.755333 / 2.077655 (0.677679) | 1.468153 / 1.504120 (-0.035967) | 1.350247 / 1.541195 (-0.190948) | 1.379834 / 1.468490 (-0.088656) | 0.564027 / 4.584777 (-4.020750) | 2.387794 / 3.745712 (-1.357918) | 2.768529 / 5.269862 (-2.501333) | 1.761994 / 4.565676 (-2.803682) | 0.062079 / 0.424275 (-0.362196) | 0.005018 / 0.007607 (-0.002589) | 0.337576 / 0.226044 (0.111532) | 3.345347 / 2.268929 (1.076418) | 1.821950 / 55.444624 (-53.622674) | 1.545471 / 6.876477 (-5.331006) | 1.534941 / 2.142072 (-0.607131) | 0.626560 / 4.805227 (-4.178668) | 0.116227 / 6.500664 (-6.384437) | 0.041722 / 0.075469 (-0.033747) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.950480 / 1.841788 (-0.891307) | 11.616355 / 8.074308 (3.542047) | 10.426687 / 10.191392 (0.235295) | 0.129967 / 0.680424 (-0.550457) | 0.013977 / 0.534201 (-0.520224) | 0.287150 / 0.579283 (-0.292133) | 0.264028 / 0.434364 (-0.170336) | 0.325061 / 0.540337 (-0.215277) | 0.441281 / 1.386936 (-0.945655) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005436 / 0.011353 (-0.005917) | 0.003567 / 0.011008 (-0.007441) | 0.055275 / 0.038508 (0.016767) | 0.053216 / 0.023109 (0.030107) | 0.272826 / 0.275898 (-0.003072) | 0.298399 / 0.323480 (-0.025081) | 0.004803 / 0.007986 (-0.003183) | 0.002681 / 0.004328 (-0.001648) | 0.048704 / 0.004250 (0.044453) | 0.040048 / 0.037052 (0.002996) | 0.278200 / 0.258489 (0.019711) | 0.331167 / 0.293841 (0.037326) | 0.029282 / 0.128546 (-0.099265) | 0.010766 / 0.075646 (-0.064881) | 0.057370 / 0.419271 (-0.361902) | 0.032674 / 0.043533 (-0.010859) | 0.269430 / 0.255139 (0.014291) | 0.288256 / 0.283200 (0.005056) | 0.019340 / 0.141683 (-0.122343) | 1.118058 / 1.452155 (-0.334097) | 1.157811 / 1.492716 (-0.334906) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094091 / 0.018006 (0.076085) | 0.301833 / 0.000490 (0.301343) | 0.000216 / 0.000200 (0.000016) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021327 / 0.037411 (-0.016085) | 0.068636 / 0.014526 (0.054110) | 0.080246 / 0.176557 (-0.096311) | 0.120524 / 0.737135 (-0.616611) | 0.082226 / 0.296338 (-0.214113) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293579 / 0.215209 (0.078370) | 2.880281 / 2.077655 (0.802626) | 1.594647 / 1.504120 (0.090528) | 1.477152 / 1.541195 (-0.064043) | 1.498122 / 1.468490 (0.029632) | 0.555073 / 4.584777 (-4.029704) | 2.446743 / 3.745712 (-1.298970) | 2.794971 / 5.269862 (-2.474890) | 1.749730 / 4.565676 (-2.815947) | 0.062537 / 0.424275 (-0.361738) | 0.004908 / 0.007607 (-0.002699) | 0.350772 / 0.226044 (0.124727) | 3.486535 / 2.268929 (1.217607) | 1.957414 / 55.444624 (-53.487210) | 1.669169 / 6.876477 (-5.207308) | 1.682396 / 2.142072 (-0.459676) | 0.627379 / 4.805227 (-4.177848) | 0.117218 / 6.500664 (-6.383446) | 0.041000 / 0.075469 (-0.034469) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.958248 / 1.841788 (-0.883539) | 12.022677 / 8.074308 (3.948369) | 10.331661 / 10.191392 (0.140269) | 0.129765 / 0.680424 (-0.550659) | 0.015073 / 0.534201 (-0.519128) | 0.287212 / 0.579283 (-0.292071) | 0.278310 / 0.434364 (-0.156054) | 0.328155 / 0.540337 (-0.212183) | 0.564990 / 1.386936 (-0.821946) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0c16e56371e50adae771288945e3389cb81a31fd \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005576 / 0.011353 (-0.005777) | 0.003430 / 0.011008 (-0.007578) | 0.062714 / 0.038508 (0.024206) | 0.051240 / 0.023109 (0.028131) | 0.236637 / 0.275898 (-0.039261) | 0.262660 / 0.323480 (-0.060820) | 0.002924 / 0.007986 (-0.005061) | 0.002712 / 0.004328 (-0.001616) | 0.048680 / 0.004250 (0.044430) | 0.038997 / 0.037052 (0.001945) | 0.241426 / 0.258489 (-0.017063) | 0.270652 / 0.293841 (-0.023189) | 0.027355 / 0.128546 (-0.101192) | 0.010640 / 0.075646 (-0.065006) | 0.207754 / 0.419271 (-0.211517) | 0.035921 / 0.043533 (-0.007612) | 0.247645 / 0.255139 (-0.007494) | 0.262933 / 0.283200 (-0.020266) | 0.019658 / 0.141683 (-0.122025) | 1.112576 / 1.452155 (-0.339578) | 1.177362 / 1.492716 (-0.315354) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098100 / 0.018006 (0.080093) | 0.310170 / 0.000490 (0.309680) | 0.000220 / 0.000200 (0.000020) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019626 / 0.037411 (-0.017785) | 0.065468 / 0.014526 (0.050942) | 0.074767 / 0.176557 (-0.101789) | 0.123619 / 0.737135 (-0.613516) | 0.077159 / 0.296338 (-0.219179) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.288585 / 0.215209 (0.073376) | 2.771254 / 2.077655 (0.693599) | 1.457091 / 1.504120 (-0.047029) | 1.324341 / 1.541195 (-0.216854) | 1.361960 / 1.468490 (-0.106530) | 0.574197 / 4.584777 (-4.010580) | 2.391440 / 3.745712 (-1.354273) | 2.935060 / 5.269862 (-2.334802) | 1.802792 / 4.565676 (-2.762884) | 0.063530 / 0.424275 (-0.360745) | 0.005129 / 0.007607 (-0.002478) | 0.345977 / 0.226044 (0.119933) | 3.368042 / 2.268929 (1.099113) | 1.789575 / 55.444624 (-53.655050) | 1.509165 / 6.876477 (-5.367312) | 1.579792 / 2.142072 (-0.562280) | 0.652136 / 4.805227 (-4.153091) | 0.117014 / 6.500664 (-6.383650) | 0.042385 / 0.075469 (-0.033084) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963967 / 1.841788 (-0.877821) | 11.847856 / 8.074308 (3.773548) | 10.584088 / 10.191392 (0.392696) | 0.143953 / 0.680424 (-0.536471) | 0.014355 / 0.534201 (-0.519846) | 0.286936 / 0.579283 (-0.292347) | 0.269039 / 0.434364 (-0.165325) | 0.324531 / 0.540337 (-0.215807) | 0.443187 / 1.386936 (-0.943749) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005448 / 0.011353 (-0.005905) | 0.003742 / 0.011008 (-0.007266) | 0.048808 / 0.038508 (0.010300) | 0.055409 / 0.023109 (0.032300) | 0.271574 / 0.275898 (-0.004324) | 0.295599 / 0.323480 (-0.027881) | 0.004208 / 0.007986 (-0.003778) | 0.002683 / 0.004328 (-0.001645) | 0.048813 / 0.004250 (0.044562) | 0.043672 / 0.037052 (0.006620) | 0.282173 / 0.258489 (0.023684) | 0.295447 / 0.293841 (0.001606) | 0.030461 / 0.128546 (-0.098086) | 0.010988 / 0.075646 (-0.064658) | 0.057050 / 0.419271 (-0.362221) | 0.033329 / 0.043533 (-0.010203) | 0.269700 / 0.255139 (0.014561) | 0.287099 / 0.283200 (0.003899) | 0.018203 / 0.141683 (-0.123480) | 1.142584 / 1.452155 (-0.309571) | 1.181848 / 1.492716 (-0.310869) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096958 / 0.018006 (0.078952) | 0.310563 / 0.000490 (0.310074) | 0.000224 / 0.000200 (0.000024) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022213 / 0.037411 (-0.015199) | 0.072054 / 0.014526 (0.057528) | 0.086393 / 0.176557 (-0.090163) | 0.122431 / 0.737135 (-0.614704) | 0.085298 / 0.296338 (-0.211041) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290823 / 0.215209 (0.075614) | 2.838026 / 2.077655 (0.760371) | 1.541425 / 1.504120 (0.037305) | 1.431903 / 1.541195 (-0.109292) | 1.476567 / 1.468490 (0.008077) | 0.557856 / 4.584777 (-4.026920) | 2.449101 / 3.745712 (-1.296611) | 2.924633 / 5.269862 (-2.345229) | 1.824420 / 4.565676 (-2.741256) | 0.063735 / 0.424275 (-0.360540) | 0.005025 / 0.007607 (-0.002582) | 0.349458 / 0.226044 (0.123413) | 3.468627 / 2.268929 (1.199699) | 1.925173 / 55.444624 (-53.519451) | 1.655038 / 6.876477 (-5.221439) | 1.698612 / 2.142072 (-0.443460) | 0.643623 / 4.805227 (-4.161604) | 0.116128 / 6.500664 (-6.384536) | 0.042283 / 0.075469 (-0.033186) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963029 / 1.841788 (-0.878758) | 13.273985 / 8.074308 (5.199677) | 11.400884 / 10.191392 (1.209492) | 0.152635 / 0.680424 (-0.527788) | 0.016442 / 0.534201 (-0.517759) | 0.289272 / 0.579283 (-0.290012) | 0.285286 / 0.434364 (-0.149078) | 0.330028 / 0.540337 (-0.210310) | 0.596500 / 1.386936 (-0.790436) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#9c427c4b1dcf84c898ae62dc521bf446bb35e0e7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005124 / 0.011353 (-0.006229) | 0.003832 / 0.011008 (-0.007176) | 0.062806 / 0.038508 (0.024298) | 0.053137 / 0.023109 (0.030028) | 0.241155 / 0.275898 (-0.034743) | 0.260521 / 0.323480 (-0.062959) | 0.004005 / 0.007986 (-0.003981) | 0.002754 / 0.004328 (-0.001575) | 0.048934 / 0.004250 (0.044684) | 0.039438 / 0.037052 (0.002385) | 0.242534 / 0.258489 (-0.015955) | 0.275498 / 0.293841 (-0.018343) | 0.027338 / 0.128546 (-0.101208) | 0.010809 / 0.075646 (-0.064837) | 0.206986 / 0.419271 (-0.212285) | 0.035614 / 0.043533 (-0.007919) | 0.245780 / 0.255139 (-0.009359) | 0.259793 / 0.283200 (-0.023407) | 0.018108 / 0.141683 (-0.123575) | 1.103412 / 1.452155 (-0.348742) | 1.162940 / 1.492716 (-0.329776) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092463 / 0.018006 (0.074457) | 0.299516 / 0.000490 (0.299026) | 0.000210 / 0.000200 (0.000010) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018261 / 0.037411 (-0.019150) | 0.060178 / 0.014526 (0.045652) | 0.073043 / 0.176557 (-0.103513) | 0.120541 / 0.737135 (-0.616594) | 0.074972 / 0.296338 (-0.221367) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287288 / 0.215209 (0.072078) | 2.814915 / 2.077655 (0.737260) | 1.520221 / 1.504120 (0.016101) | 1.396045 / 1.541195 (-0.145149) | 1.419662 / 1.468490 (-0.048828) | 0.589247 / 4.584777 (-3.995530) | 2.411101 / 3.745712 (-1.334611) | 2.777709 / 5.269862 (-2.492153) | 1.750386 / 4.565676 (-2.815291) | 0.063734 / 0.424275 (-0.360541) | 0.005021 / 0.007607 (-0.002586) | 0.338817 / 0.226044 (0.112773) | 3.371218 / 2.268929 (1.102289) | 1.892691 / 55.444624 (-53.551934) | 1.599039 / 6.876477 (-5.277438) | 1.574726 / 2.142072 (-0.567346) | 0.665623 / 4.805227 (-4.139604) | 0.118628 / 6.500664 (-6.382036) | 0.041803 / 0.075469 (-0.033666) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.948696 / 1.841788 (-0.893092) | 11.502916 / 8.074308 (3.428608) | 10.301174 / 10.191392 (0.109782) | 0.141752 / 0.680424 (-0.538672) | 0.014064 / 0.534201 (-0.520137) | 0.286701 / 0.579283 (-0.292583) | 0.265805 / 0.434364 (-0.168559) | 0.328420 / 0.540337 (-0.211917) | 0.433619 / 1.386936 (-0.953317) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005262 / 0.011353 (-0.006091) | 0.003361 / 0.011008 (-0.007648) | 0.049525 / 0.038508 (0.011016) | 0.048950 / 0.023109 (0.025841) | 0.273617 / 0.275898 (-0.002281) | 0.296614 / 0.323480 (-0.026866) | 0.004014 / 0.007986 (-0.003971) | 0.002630 / 0.004328 (-0.001698) | 0.048203 / 0.004250 (0.043952) | 0.040912 / 0.037052 (0.003860) | 0.279736 / 0.258489 (0.021247) | 0.301671 / 0.293841 (0.007830) | 0.028546 / 0.128546 (-0.100000) | 0.010440 / 0.075646 (-0.065206) | 0.057869 / 0.419271 (-0.361402) | 0.032876 / 0.043533 (-0.010657) | 0.277649 / 0.255139 (0.022510) | 0.296565 / 0.283200 (0.013365) | 0.017558 / 0.141683 (-0.124125) | 1.155005 / 1.452155 (-0.297149) | 1.204827 / 1.492716 (-0.287889) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093248 / 0.018006 (0.075242) | 0.302721 / 0.000490 (0.302231) | 0.000218 / 0.000200 (0.000018) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021882 / 0.037411 (-0.015530) | 0.068259 / 0.014526 (0.053733) | 0.080982 / 0.176557 (-0.095574) | 0.119386 / 0.737135 (-0.617750) | 0.081745 / 0.296338 (-0.214593) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297812 / 0.215209 (0.082603) | 2.909938 / 2.077655 (0.832283) | 1.603736 / 1.504120 (0.099616) | 1.482989 / 1.541195 (-0.058206) | 1.495107 / 1.468490 (0.026617) | 0.562275 / 4.584777 (-4.022502) | 2.424812 / 3.745712 (-1.320901) | 2.759127 / 5.269862 (-2.510735) | 1.733283 / 4.565676 (-2.832394) | 0.063144 / 0.424275 (-0.361131) | 0.004949 / 0.007607 (-0.002658) | 0.352756 / 0.226044 (0.126711) | 3.496028 / 2.268929 (1.227100) | 1.982804 / 55.444624 (-53.461820) | 1.689787 / 6.876477 (-5.186690) | 1.672699 / 2.142072 (-0.469373) | 0.660169 / 4.805227 (-4.145059) | 0.116535 / 6.500664 (-6.384129) | 0.040616 / 0.075469 (-0.034853) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.975055 / 1.841788 (-0.866733) | 11.919295 / 8.074308 (3.844986) | 10.779188 / 10.191392 (0.587796) | 0.143106 / 0.680424 (-0.537318) | 0.015159 / 0.534201 (-0.519041) | 0.289734 / 0.579283 (-0.289549) | 0.278637 / 0.434364 (-0.155727) | 0.328159 / 0.540337 (-0.212178) | 0.570560 / 1.386936 (-0.816376) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#241500208da5fef64ad6ddc1cc5ab2be18f2f76d \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005155 / 0.011353 (-0.006198) | 0.003589 / 0.011008 (-0.007419) | 0.064440 / 0.038508 (0.025932) | 0.051020 / 0.023109 (0.027911) | 0.246099 / 0.275898 (-0.029799) | 0.273383 / 0.323480 (-0.050097) | 0.003984 / 0.007986 (-0.004002) | 0.002791 / 0.004328 (-0.001537) | 0.049076 / 0.004250 (0.044826) | 0.037975 / 0.037052 (0.000922) | 0.253709 / 0.258489 (-0.004780) | 0.281730 / 0.293841 (-0.012111) | 0.028060 / 0.128546 (-0.100486) | 0.010808 / 0.075646 (-0.064838) | 0.206663 / 0.419271 (-0.212609) | 0.035989 / 0.043533 (-0.007544) | 0.252635 / 0.255139 (-0.002504) | 0.280042 / 0.283200 (-0.003158) | 0.016982 / 0.141683 (-0.124700) | 1.098679 / 1.452155 (-0.353475) | 1.157051 / 1.492716 (-0.335666) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098238 / 0.018006 (0.080232) | 0.311990 / 0.000490 (0.311501) | 0.000229 / 0.000200 (0.000029) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018270 / 0.037411 (-0.019141) | 0.062711 / 0.014526 (0.048186) | 0.074381 / 0.176557 (-0.102175) | 0.119946 / 0.737135 (-0.617189) | 0.075013 / 0.296338 (-0.221325) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282106 / 0.215209 (0.066897) | 2.752653 / 2.077655 (0.674999) | 1.488771 / 1.504120 (-0.015349) | 1.372552 / 1.541195 (-0.168643) | 1.390270 / 1.468490 (-0.078220) | 0.558928 / 4.584777 (-4.025849) | 2.411821 / 3.745712 (-1.333891) | 2.771441 / 5.269862 (-2.498421) | 1.747507 / 4.565676 (-2.818169) | 0.061360 / 0.424275 (-0.362915) | 0.004956 / 0.007607 (-0.002652) | 0.332330 / 0.226044 (0.106286) | 3.301405 / 2.268929 (1.032476) | 1.786726 / 55.444624 (-53.657899) | 1.529974 / 6.876477 (-5.346502) | 1.538412 / 2.142072 (-0.603660) | 0.637590 / 4.805227 (-4.167637) | 0.117215 / 6.500664 (-6.383449) | 0.042186 / 0.075469 (-0.033283) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.945574 / 1.841788 (-0.896213) | 11.616152 / 8.074308 (3.541844) | 10.365114 / 10.191392 (0.173722) | 0.130358 / 0.680424 (-0.550066) | 0.013587 / 0.534201 (-0.520614) | 0.306024 / 0.579283 (-0.273259) | 0.270577 / 0.434364 (-0.163787) | 0.340768 / 0.540337 (-0.199569) | 0.460841 / 1.386936 (-0.926095) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005254 / 0.011353 (-0.006099) | 0.003137 / 0.011008 (-0.007871) | 0.048302 / 0.038508 (0.009794) | 0.051952 / 0.023109 (0.028843) | 0.269078 / 0.275898 (-0.006820) | 0.292044 / 0.323480 (-0.031436) | 0.003985 / 0.007986 (-0.004000) | 0.002597 / 0.004328 (-0.001732) | 0.049998 / 0.004250 (0.045747) | 0.040227 / 0.037052 (0.003174) | 0.274714 / 0.258489 (0.016225) | 0.298160 / 0.293841 (0.004319) | 0.028857 / 0.128546 (-0.099690) | 0.010545 / 0.075646 (-0.065101) | 0.057234 / 0.419271 (-0.362038) | 0.032515 / 0.043533 (-0.011018) | 0.271526 / 0.255139 (0.016387) | 0.288556 / 0.283200 (0.005356) | 0.018155 / 0.141683 (-0.123527) | 1.201906 / 1.452155 (-0.250248) | 1.220068 / 1.492716 (-0.272648) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.100098 / 0.018006 (0.082092) | 0.311081 / 0.000490 (0.310591) | 0.000231 / 0.000200 (0.000032) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022349 / 0.037411 (-0.015062) | 0.069698 / 0.014526 (0.055172) | 0.081334 / 0.176557 (-0.095222) | 0.120847 / 0.737135 (-0.616289) | 0.082091 / 0.296338 (-0.214248) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293810 / 0.215209 (0.078601) | 2.844191 / 2.077655 (0.766536) | 1.594494 / 1.504120 (0.090374) | 1.486531 / 1.541195 (-0.054664) | 1.506307 / 1.468490 (0.037817) | 0.560247 / 4.584777 (-4.024530) | 2.478309 / 3.745712 (-1.267403) | 2.759024 / 5.269862 (-2.510837) | 1.733063 / 4.565676 (-2.832613) | 0.061838 / 0.424275 (-0.362438) | 0.004869 / 0.007607 (-0.002738) | 0.347267 / 0.226044 (0.121222) | 3.407737 / 2.268929 (1.138808) | 1.944420 / 55.444624 (-53.500204) | 1.660060 / 6.876477 (-5.216417) | 1.704219 / 2.142072 (-0.437854) | 0.646969 / 4.805227 (-4.158258) | 0.115750 / 6.500664 (-6.384914) | 0.041614 / 0.075469 (-0.033855) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.972537 / 1.841788 (-0.869251) | 12.013530 / 8.074308 (3.939222) | 10.650215 / 10.191392 (0.458823) | 0.132877 / 0.680424 (-0.547547) | 0.016828 / 0.534201 (-0.517372) | 0.288321 / 0.579283 (-0.290962) | 0.284203 / 0.434364 (-0.150161) | 0.324016 / 0.540337 (-0.216321) | 0.575403 / 1.386936 (-0.811533) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#17ec1a7a610adba3db44f316a930b979872d4ef7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005925 / 0.011353 (-0.005427) | 0.005138 / 0.011008 (-0.005870) | 0.069865 / 0.038508 (0.031356) | 0.067181 / 0.023109 (0.044072) | 0.309642 / 0.275898 (0.033743) | 0.302919 / 0.323480 (-0.020561) | 0.003365 / 0.007986 (-0.004620) | 0.003148 / 0.004328 (-0.001180) | 0.054102 / 0.004250 (0.049852) | 0.044196 / 0.037052 (0.007143) | 0.306882 / 0.258489 (0.048393) | 0.315153 / 0.293841 (0.021313) | 0.030458 / 0.128546 (-0.098089) | 0.011773 / 0.075646 (-0.063874) | 0.235075 / 0.419271 (-0.184196) | 0.040840 / 0.043533 (-0.002693) | 0.279897 / 0.255139 (0.024758) | 0.316334 / 0.283200 (0.033135) | 0.020128 / 0.141683 (-0.121555) | 1.237327 / 1.452155 (-0.214828) | 1.290386 / 1.492716 (-0.202331) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.118540 / 0.018006 (0.100534) | 0.363282 / 0.000490 (0.362792) | 0.000266 / 0.000200 (0.000066) | 0.000058 / 0.000054 (0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021435 / 0.037411 (-0.015977) | 0.068124 / 0.014526 (0.053598) | 0.082747 / 0.176557 (-0.093809) | 0.137179 / 0.737135 (-0.599956) | 0.084815 / 0.296338 (-0.211523) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.307836 / 0.215209 (0.092626) | 2.983444 / 2.077655 (0.905790) | 1.616430 / 1.504120 (0.112310) | 1.466843 / 1.541195 (-0.074351) | 1.512440 / 1.468490 (0.043950) | 0.652311 / 4.584777 (-3.932466) | 2.676420 / 3.745712 (-1.069292) | 3.265747 / 5.269862 (-2.004115) | 2.028586 / 4.565676 (-2.537090) | 0.071997 / 0.424275 (-0.352278) | 0.007068 / 0.007607 (-0.000539) | 0.367199 / 0.226044 (0.141155) | 3.617970 / 2.268929 (1.349042) | 1.991345 / 55.444624 (-53.453280) | 1.670015 / 6.876477 (-5.206462) | 1.720515 / 2.142072 (-0.421557) | 0.724649 / 4.805227 (-4.080579) | 0.134888 / 6.500664 (-6.365776) | 0.048325 / 0.075469 (-0.027144) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.051058 / 1.841788 (-0.790730) | 13.772809 / 8.074308 (5.698501) | 11.813879 / 10.191392 (1.622487) | 0.160065 / 0.680424 (-0.520359) | 0.016256 / 0.534201 (-0.517945) | 0.320393 / 0.579283 (-0.258890) | 0.314462 / 0.434364 (-0.119901) | 0.371911 / 0.540337 (-0.168427) | 0.506864 / 1.386936 (-0.880072) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005857 / 0.011353 (-0.005496) | 0.004077 / 0.011008 (-0.006931) | 0.056033 / 0.038508 (0.017525) | 0.067622 / 0.023109 (0.044513) | 0.298956 / 0.275898 (0.023058) | 0.323484 / 0.323480 (0.000004) | 0.004825 / 0.007986 (-0.003160) | 0.003120 / 0.004328 (-0.001208) | 0.055227 / 0.004250 (0.050976) | 0.048439 / 0.037052 (0.011387) | 0.303207 / 0.258489 (0.044718) | 0.329478 / 0.293841 (0.035637) | 0.032516 / 0.128546 (-0.096031) | 0.012260 / 0.075646 (-0.063386) | 0.065037 / 0.419271 (-0.354234) | 0.038799 / 0.043533 (-0.004734) | 0.299102 / 0.255139 (0.043963) | 0.318248 / 0.283200 (0.035048) | 0.020190 / 0.141683 (-0.121493) | 1.263479 / 1.452155 (-0.188676) | 1.329788 / 1.492716 (-0.162928) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.119801 / 0.018006 (0.101794) | 0.359618 / 0.000490 (0.359129) | 0.000260 / 0.000200 (0.000060) | 0.000058 / 0.000054 (0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026876 / 0.037411 (-0.010535) | 0.080637 / 0.014526 (0.066111) | 0.092260 / 0.176557 (-0.084297) | 0.137260 / 0.737135 (-0.599875) | 0.093309 / 0.296338 (-0.203029) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.329327 / 0.215209 (0.114118) | 3.193014 / 2.077655 (1.115359) | 1.755838 / 1.504120 (0.251718) | 1.612279 / 1.541195 (0.071084) | 1.631958 / 1.468490 (0.163468) | 0.630886 / 4.584777 (-3.953891) | 2.739731 / 3.745712 (-1.005981) | 3.186745 / 5.269862 (-2.083117) | 1.987125 / 4.565676 (-2.578552) | 0.070694 / 0.424275 (-0.353581) | 0.006461 / 0.007607 (-0.001146) | 0.386367 / 0.226044 (0.160323) | 3.815837 / 2.268929 (1.546908) | 2.155904 / 55.444624 (-53.288720) | 1.832575 / 6.876477 (-5.043902) | 1.842097 / 2.142072 (-0.299975) | 0.716394 / 4.805227 (-4.088833) | 0.130796 / 6.500664 (-6.369869) | 0.045674 / 0.075469 (-0.029795) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.109117 / 1.841788 (-0.732671) | 14.116582 / 8.074308 (6.042274) | 11.926356 / 10.191392 (1.734964) | 0.150543 / 0.680424 (-0.529881) | 0.017426 / 0.534201 (-0.516775) | 0.323058 / 0.579283 (-0.256225) | 0.330228 / 0.434364 (-0.104136) | 0.372533 / 0.540337 (-0.167804) | 0.661348 / 1.386936 (-0.725588) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#04ffd22a30ecc7545234559edd9d23c85c6d84d9 \"CML watermark\")\n", "Thanks for the review, I took your comments into account !", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005477 / 0.011353 (-0.005876) | 0.003509 / 0.011008 (-0.007499) | 0.062884 / 0.038508 (0.024376) | 0.051042 / 0.023109 (0.027933) | 0.285180 / 0.275898 (0.009282) | 0.315353 / 0.323480 (-0.008127) | 0.002943 / 0.007986 (-0.005043) | 0.003286 / 0.004328 (-0.001042) | 0.048885 / 0.004250 (0.044635) | 0.038591 / 0.037052 (0.001539) | 0.288527 / 0.258489 (0.030038) | 0.316102 / 0.293841 (0.022261) | 0.028252 / 0.128546 (-0.100295) | 0.010622 / 0.075646 (-0.065024) | 0.205573 / 0.419271 (-0.213699) | 0.035764 / 0.043533 (-0.007769) | 0.285729 / 0.255139 (0.030590) | 0.304578 / 0.283200 (0.021378) | 0.019862 / 0.141683 (-0.121821) | 1.102866 / 1.452155 (-0.349288) | 1.175161 / 1.492716 (-0.317555) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095253 / 0.018006 (0.077246) | 0.302290 / 0.000490 (0.301800) | 0.000243 / 0.000200 (0.000043) | 0.000061 / 0.000054 (0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018680 / 0.037411 (-0.018731) | 0.060375 / 0.014526 (0.045849) | 0.074033 / 0.176557 (-0.102524) | 0.120290 / 0.737135 (-0.616845) | 0.075350 / 0.296338 (-0.220989) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.277617 / 0.215209 (0.062408) | 2.718201 / 2.077655 (0.640546) | 1.462952 / 1.504120 (-0.041168) | 1.339199 / 1.541195 (-0.201996) | 1.375805 / 1.468490 (-0.092685) | 0.559956 / 4.584777 (-4.024821) | 2.373865 / 3.745712 (-1.371847) | 2.795732 / 5.269862 (-2.474129) | 1.755490 / 4.565676 (-2.810186) | 0.062002 / 0.424275 (-0.362273) | 0.004935 / 0.007607 (-0.002672) | 0.334786 / 0.226044 (0.108741) | 3.237499 / 2.268929 (0.968571) | 1.787561 / 55.444624 (-53.657064) | 1.513300 / 6.876477 (-5.363176) | 1.549797 / 2.142072 (-0.592275) | 0.643587 / 4.805227 (-4.161640) | 0.117275 / 6.500664 (-6.383389) | 0.042184 / 0.075469 (-0.033285) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.933366 / 1.841788 (-0.908421) | 11.792282 / 8.074308 (3.717973) | 10.466608 / 10.191392 (0.275216) | 0.142148 / 0.680424 (-0.538275) | 0.014084 / 0.534201 (-0.520117) | 0.287233 / 0.579283 (-0.292050) | 0.266022 / 0.434364 (-0.168342) | 0.326854 / 0.540337 (-0.213483) | 0.451348 / 1.386936 (-0.935588) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005384 / 0.011353 (-0.005969) | 0.003562 / 0.011008 (-0.007446) | 0.049014 / 0.038508 (0.010506) | 0.057480 / 0.023109 (0.034371) | 0.274456 / 0.275898 (-0.001442) | 0.298387 / 0.323480 (-0.025093) | 0.003909 / 0.007986 (-0.004076) | 0.002646 / 0.004328 (-0.001683) | 0.048374 / 0.004250 (0.044124) | 0.040907 / 0.037052 (0.003854) | 0.278267 / 0.258489 (0.019778) | 0.299862 / 0.293841 (0.006021) | 0.029108 / 0.128546 (-0.099439) | 0.010752 / 0.075646 (-0.064894) | 0.057523 / 0.419271 (-0.361749) | 0.032692 / 0.043533 (-0.010841) | 0.276288 / 0.255139 (0.021149) | 0.291572 / 0.283200 (0.008372) | 0.017818 / 0.141683 (-0.123865) | 1.129517 / 1.452155 (-0.322638) | 1.186630 / 1.492716 (-0.306086) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093405 / 0.018006 (0.075399) | 0.301254 / 0.000490 (0.300764) | 0.000225 / 0.000200 (0.000025) | 0.000054 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021793 / 0.037411 (-0.015618) | 0.069033 / 0.014526 (0.054508) | 0.083502 / 0.176557 (-0.093055) | 0.122149 / 0.737135 (-0.614986) | 0.083801 / 0.296338 (-0.212537) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.299149 / 0.215209 (0.083940) | 2.936550 / 2.077655 (0.858895) | 1.595766 / 1.504120 (0.091647) | 1.487117 / 1.541195 (-0.054078) | 1.494606 / 1.468490 (0.026116) | 0.569346 / 4.584777 (-4.015431) | 2.445642 / 3.745712 (-1.300070) | 2.805696 / 5.269862 (-2.464165) | 1.743796 / 4.565676 (-2.821881) | 0.062695 / 0.424275 (-0.361580) | 0.004885 / 0.007607 (-0.002723) | 0.354186 / 0.226044 (0.128142) | 3.487926 / 2.268929 (1.218997) | 1.965703 / 55.444624 (-53.478922) | 1.682284 / 6.876477 (-5.194193) | 1.705586 / 2.142072 (-0.436487) | 0.655099 / 4.805227 (-4.150128) | 0.116441 / 6.500664 (-6.384223) | 0.040851 / 0.075469 (-0.034618) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967361 / 1.841788 (-0.874427) | 12.037718 / 8.074308 (3.963409) | 10.599761 / 10.191392 (0.408369) | 0.143127 / 0.680424 (-0.537297) | 0.015063 / 0.534201 (-0.519138) | 0.286894 / 0.579283 (-0.292389) | 0.301505 / 0.434364 (-0.132859) | 0.324339 / 0.540337 (-0.215999) | 0.591782 / 1.386936 (-0.795154) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b96ff08d4aa6dbafc8a10a9d03dfabe236378bcd \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005337 / 0.011353 (-0.006015) | 0.004074 / 0.011008 (-0.006934) | 0.062653 / 0.038508 (0.024145) | 0.054295 / 0.023109 (0.031186) | 0.248284 / 0.275898 (-0.027614) | 0.271604 / 0.323480 (-0.051876) | 0.003931 / 0.007986 (-0.004055) | 0.002907 / 0.004328 (-0.001422) | 0.047991 / 0.004250 (0.043740) | 0.042842 / 0.037052 (0.005790) | 0.253648 / 0.258489 (-0.004841) | 0.282546 / 0.293841 (-0.011295) | 0.028005 / 0.128546 (-0.100541) | 0.010734 / 0.075646 (-0.064912) | 0.210023 / 0.419271 (-0.209248) | 0.035940 / 0.043533 (-0.007592) | 0.250766 / 0.255139 (-0.004373) | 0.267644 / 0.283200 (-0.015556) | 0.020451 / 0.141683 (-0.121232) | 1.114972 / 1.452155 (-0.337183) | 1.159823 / 1.492716 (-0.332893) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095527 / 0.018006 (0.077521) | 0.303321 / 0.000490 (0.302831) | 0.000216 / 0.000200 (0.000016) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018725 / 0.037411 (-0.018686) | 0.062537 / 0.014526 (0.048011) | 0.073091 / 0.176557 (-0.103466) | 0.119570 / 0.737135 (-0.617565) | 0.074863 / 0.296338 (-0.221476) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284936 / 0.215209 (0.069727) | 2.802498 / 2.077655 (0.724843) | 1.493316 / 1.504120 (-0.010804) | 1.372319 / 1.541195 (-0.168875) | 1.403657 / 1.468490 (-0.064833) | 0.569303 / 4.584777 (-4.015474) | 2.402498 / 3.745712 (-1.343214) | 2.834778 / 5.269862 (-2.435084) | 1.791312 / 4.565676 (-2.774365) | 0.062526 / 0.424275 (-0.361749) | 0.004947 / 0.007607 (-0.002660) | 0.345141 / 0.226044 (0.119097) | 3.371863 / 2.268929 (1.102934) | 1.846023 / 55.444624 (-53.598602) | 1.596368 / 6.876477 (-5.280109) | 1.615902 / 2.142072 (-0.526170) | 0.644333 / 4.805227 (-4.160894) | 0.119460 / 6.500664 (-6.381204) | 0.049122 / 0.075469 (-0.026347) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.951839 / 1.841788 (-0.889948) | 11.677074 / 8.074308 (3.602766) | 10.562586 / 10.191392 (0.371194) | 0.143633 / 0.680424 (-0.536791) | 0.014157 / 0.534201 (-0.520044) | 0.289141 / 0.579283 (-0.290142) | 0.264719 / 0.434364 (-0.169645) | 0.327862 / 0.540337 (-0.212476) | 0.451215 / 1.386936 (-0.935721) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005343 / 0.011353 (-0.006010) | 0.003522 / 0.011008 (-0.007486) | 0.049354 / 0.038508 (0.010846) | 0.051441 / 0.023109 (0.028332) | 0.259350 / 0.275898 (-0.016548) | 0.288946 / 0.323480 (-0.034534) | 0.004052 / 0.007986 (-0.003934) | 0.002690 / 0.004328 (-0.001639) | 0.049996 / 0.004250 (0.045746) | 0.040224 / 0.037052 (0.003171) | 0.264588 / 0.258489 (0.006099) | 0.296474 / 0.293841 (0.002633) | 0.028868 / 0.128546 (-0.099679) | 0.010917 / 0.075646 (-0.064730) | 0.057866 / 0.419271 (-0.361405) | 0.032610 / 0.043533 (-0.010923) | 0.260657 / 0.255139 (0.005518) | 0.276947 / 0.283200 (-0.006253) | 0.018877 / 0.141683 (-0.122806) | 1.126205 / 1.452155 (-0.325949) | 1.206173 / 1.492716 (-0.286543) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094464 / 0.018006 (0.076458) | 0.304473 / 0.000490 (0.303984) | 0.000231 / 0.000200 (0.000031) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021472 / 0.037411 (-0.015939) | 0.070864 / 0.014526 (0.056338) | 0.086607 / 0.176557 (-0.089950) | 0.120679 / 0.737135 (-0.616456) | 0.084271 / 0.296338 (-0.212068) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296448 / 0.215209 (0.081239) | 2.893996 / 2.077655 (0.816341) | 1.573409 / 1.504120 (0.069289) | 1.438799 / 1.541195 (-0.102396) | 1.461241 / 1.468490 (-0.007249) | 0.566737 / 4.584777 (-4.018040) | 2.425709 / 3.745712 (-1.320003) | 2.826764 / 5.269862 (-2.443098) | 1.785330 / 4.565676 (-2.780347) | 0.063721 / 0.424275 (-0.360554) | 0.005158 / 0.007607 (-0.002449) | 0.354961 / 0.226044 (0.128916) | 3.457499 / 2.268929 (1.188570) | 1.931374 / 55.444624 (-53.513251) | 1.646515 / 6.876477 (-5.229962) | 1.629891 / 2.142072 (-0.512182) | 0.648922 / 4.805227 (-4.156305) | 0.114953 / 6.500664 (-6.385711) | 0.040997 / 0.075469 (-0.034472) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.951049 / 1.841788 (-0.890739) | 12.258298 / 8.074308 (4.183990) | 10.663309 / 10.191392 (0.471917) | 0.142933 / 0.680424 (-0.537491) | 0.015927 / 0.534201 (-0.518273) | 0.286914 / 0.579283 (-0.292369) | 0.286600 / 0.434364 (-0.147764) | 0.324464 / 0.540337 (-0.215874) | 0.575075 / 1.386936 (-0.811861) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ed47b9d5e9c6aa03a0aa07d8abfd3fa8241da353 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005298 / 0.011353 (-0.006055) | 0.003645 / 0.011008 (-0.007363) | 0.061629 / 0.038508 (0.023121) | 0.052322 / 0.023109 (0.029212) | 0.242579 / 0.275898 (-0.033319) | 0.263525 / 0.323480 (-0.059955) | 0.002794 / 0.007986 (-0.005192) | 0.002152 / 0.004328 (-0.002177) | 0.048301 / 0.004250 (0.044050) | 0.038177 / 0.037052 (0.001125) | 0.247724 / 0.258489 (-0.010765) | 0.274455 / 0.293841 (-0.019386) | 0.026992 / 0.128546 (-0.101555) | 0.010110 / 0.075646 (-0.065536) | 0.205662 / 0.419271 (-0.213609) | 0.034901 / 0.043533 (-0.008632) | 0.241920 / 0.255139 (-0.013219) | 0.262048 / 0.283200 (-0.021152) | 0.019111 / 0.141683 (-0.122572) | 1.127600 / 1.452155 (-0.324555) | 1.193931 / 1.492716 (-0.298786) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090321 / 0.018006 (0.072315) | 0.299046 / 0.000490 (0.298556) | 0.000197 / 0.000200 (-0.000003) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018278 / 0.037411 (-0.019133) | 0.060114 / 0.014526 (0.045588) | 0.073602 / 0.176557 (-0.102954) | 0.119676 / 0.737135 (-0.617459) | 0.074786 / 0.296338 (-0.221552) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280385 / 0.215209 (0.065176) | 2.764259 / 2.077655 (0.686604) | 1.501027 / 1.504120 (-0.003093) | 1.376900 / 1.541195 (-0.164295) | 1.390587 / 1.468490 (-0.077903) | 0.555180 / 4.584777 (-4.029597) | 2.354307 / 3.745712 (-1.391405) | 2.755862 / 5.269862 (-2.514000) | 1.714771 / 4.565676 (-2.850906) | 0.062507 / 0.424275 (-0.361768) | 0.004974 / 0.007607 (-0.002633) | 0.333900 / 0.226044 (0.107856) | 3.266922 / 2.268929 (0.997994) | 1.805401 / 55.444624 (-53.639223) | 1.526970 / 6.876477 (-5.349507) | 1.539425 / 2.142072 (-0.602647) | 0.629364 / 4.805227 (-4.175863) | 0.114929 / 6.500664 (-6.385735) | 0.041258 / 0.075469 (-0.034211) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.968601 / 1.841788 (-0.873187) | 11.260937 / 8.074308 (3.186629) | 10.393839 / 10.191392 (0.202447) | 0.127988 / 0.680424 (-0.552436) | 0.014564 / 0.534201 (-0.519637) | 0.286560 / 0.579283 (-0.292723) | 0.260493 / 0.434364 (-0.173871) | 0.330949 / 0.540337 (-0.209388) | 0.435798 / 1.386936 (-0.951138) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005232 / 0.011353 (-0.006121) | 0.003030 / 0.011008 (-0.007978) | 0.048513 / 0.038508 (0.010005) | 0.049501 / 0.023109 (0.026392) | 0.270545 / 0.275898 (-0.005353) | 0.289128 / 0.323480 (-0.034352) | 0.003925 / 0.007986 (-0.004061) | 0.002568 / 0.004328 (-0.001761) | 0.047692 / 0.004250 (0.043442) | 0.039854 / 0.037052 (0.002802) | 0.272654 / 0.258489 (0.014165) | 0.296275 / 0.293841 (0.002434) | 0.029027 / 0.128546 (-0.099519) | 0.010335 / 0.075646 (-0.065311) | 0.056726 / 0.419271 (-0.362546) | 0.033257 / 0.043533 (-0.010275) | 0.272672 / 0.255139 (0.017533) | 0.286298 / 0.283200 (0.003098) | 0.017877 / 0.141683 (-0.123806) | 1.150322 / 1.452155 (-0.301833) | 1.221031 / 1.492716 (-0.271685) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.102838 / 0.018006 (0.084832) | 0.298810 / 0.000490 (0.298320) | 0.000207 / 0.000200 (0.000007) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021232 / 0.037411 (-0.016180) | 0.067949 / 0.014526 (0.053423) | 0.116487 / 0.176557 (-0.060070) | 0.124035 / 0.737135 (-0.613100) | 0.081075 / 0.296338 (-0.215263) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289098 / 0.215209 (0.073889) | 2.844476 / 2.077655 (0.766821) | 1.609576 / 1.504120 (0.105456) | 1.480453 / 1.541195 (-0.060742) | 1.489672 / 1.468490 (0.021182) | 0.589661 / 4.584777 (-3.995116) | 2.453804 / 3.745712 (-1.291908) | 2.722381 / 5.269862 (-2.547480) | 1.720251 / 4.565676 (-2.845425) | 0.066085 / 0.424275 (-0.358190) | 0.004943 / 0.007607 (-0.002664) | 0.355149 / 0.226044 (0.129104) | 3.444323 / 2.268929 (1.175395) | 1.971157 / 55.444624 (-53.473467) | 1.683029 / 6.876477 (-5.193448) | 1.672798 / 2.142072 (-0.469274) | 0.644812 / 4.805227 (-4.160416) | 0.115098 / 6.500664 (-6.385566) | 0.039883 / 0.075469 (-0.035586) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.960454 / 1.841788 (-0.881334) | 11.604732 / 8.074308 (3.530424) | 10.405481 / 10.191392 (0.214089) | 0.129146 / 0.680424 (-0.551278) | 0.014945 / 0.534201 (-0.519256) | 0.286239 / 0.579283 (-0.293044) | 0.281041 / 0.434364 (-0.153323) | 0.320448 / 0.540337 (-0.219890) | 0.554304 / 1.386936 (-0.832632) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b2cfb7859b029654829c4dfee230812ddab1f104 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005510 / 0.011353 (-0.005843) | 0.003575 / 0.011008 (-0.007433) | 0.062232 / 0.038508 (0.023724) | 0.051115 / 0.023109 (0.028006) | 0.250709 / 0.275898 (-0.025189) | 0.274837 / 0.323480 (-0.048642) | 0.002972 / 0.007986 (-0.005014) | 0.002708 / 0.004328 (-0.001621) | 0.048088 / 0.004250 (0.043838) | 0.038588 / 0.037052 (0.001535) | 0.252550 / 0.258489 (-0.005939) | 0.285238 / 0.293841 (-0.008603) | 0.027867 / 0.128546 (-0.100679) | 0.011000 / 0.075646 (-0.064646) | 0.206918 / 0.419271 (-0.212354) | 0.035711 / 0.043533 (-0.007822) | 0.255306 / 0.255139 (0.000167) | 0.298636 / 0.283200 (0.015436) | 0.018222 / 0.141683 (-0.123461) | 1.122276 / 1.452155 (-0.329879) | 1.196471 / 1.492716 (-0.296245) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092072 / 0.018006 (0.074066) | 0.301469 / 0.000490 (0.300979) | 0.000225 / 0.000200 (0.000025) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018672 / 0.037411 (-0.018739) | 0.060235 / 0.014526 (0.045709) | 0.074036 / 0.176557 (-0.102521) | 0.119578 / 0.737135 (-0.617557) | 0.073605 / 0.296338 (-0.222734) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286474 / 0.215209 (0.071264) | 2.779427 / 2.077655 (0.701772) | 1.478746 / 1.504120 (-0.025373) | 1.362692 / 1.541195 (-0.178503) | 1.388194 / 1.468490 (-0.080296) | 0.560707 / 4.584777 (-4.024070) | 2.352846 / 3.745712 (-1.392866) | 2.784400 / 5.269862 (-2.485461) | 1.775642 / 4.565676 (-2.790035) | 0.062324 / 0.424275 (-0.361951) | 0.004938 / 0.007607 (-0.002669) | 0.334149 / 0.226044 (0.108105) | 3.319446 / 2.268929 (1.050517) | 1.810369 / 55.444624 (-53.634255) | 1.559462 / 6.876477 (-5.317014) | 1.611199 / 2.142072 (-0.530873) | 0.655984 / 4.805227 (-4.149244) | 0.118508 / 6.500664 (-6.382156) | 0.043661 / 0.075469 (-0.031808) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.935046 / 1.841788 (-0.906742) | 11.413501 / 8.074308 (3.339192) | 10.392314 / 10.191392 (0.200922) | 0.131507 / 0.680424 (-0.548917) | 0.014827 / 0.534201 (-0.519374) | 0.289069 / 0.579283 (-0.290214) | 0.268288 / 0.434364 (-0.166076) | 0.326843 / 0.540337 (-0.213495) | 0.441283 / 1.386936 (-0.945653) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005375 / 0.011353 (-0.005978) | 0.003549 / 0.011008 (-0.007459) | 0.048996 / 0.038508 (0.010488) | 0.051408 / 0.023109 (0.028298) | 0.272265 / 0.275898 (-0.003633) | 0.293228 / 0.323480 (-0.030252) | 0.004147 / 0.007986 (-0.003839) | 0.002673 / 0.004328 (-0.001655) | 0.048116 / 0.004250 (0.043865) | 0.039926 / 0.037052 (0.002874) | 0.276987 / 0.258489 (0.018498) | 0.302955 / 0.293841 (0.009115) | 0.029488 / 0.128546 (-0.099058) | 0.010797 / 0.075646 (-0.064849) | 0.057552 / 0.419271 (-0.361720) | 0.032827 / 0.043533 (-0.010706) | 0.270888 / 0.255139 (0.015749) | 0.289136 / 0.283200 (0.005937) | 0.018815 / 0.141683 (-0.122868) | 1.148624 / 1.452155 (-0.303530) | 1.191184 / 1.492716 (-0.301532) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091712 / 0.018006 (0.073706) | 0.311198 / 0.000490 (0.310708) | 0.000226 / 0.000200 (0.000026) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022097 / 0.037411 (-0.015314) | 0.070641 / 0.014526 (0.056116) | 0.080084 / 0.176557 (-0.096472) | 0.118998 / 0.737135 (-0.618137) | 0.081827 / 0.296338 (-0.214512) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298599 / 0.215209 (0.083390) | 2.884759 / 2.077655 (0.807105) | 1.630794 / 1.504120 (0.126674) | 1.454309 / 1.541195 (-0.086886) | 1.466795 / 1.468490 (-0.001695) | 0.565405 / 4.584777 (-4.019372) | 2.460883 / 3.745712 (-1.284829) | 2.764193 / 5.269862 (-2.505668) | 1.734270 / 4.565676 (-2.831407) | 0.063408 / 0.424275 (-0.360867) | 0.004887 / 0.007607 (-0.002720) | 0.347762 / 0.226044 (0.121717) | 3.458385 / 2.268929 (1.189457) | 1.965434 / 55.444624 (-53.479190) | 1.671047 / 6.876477 (-5.205430) | 1.665642 / 2.142072 (-0.476430) | 0.640665 / 4.805227 (-4.164562) | 0.116025 / 6.500664 (-6.384639) | 0.040147 / 0.075469 (-0.035322) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.982194 / 1.841788 (-0.859593) | 11.983487 / 8.074308 (3.909179) | 10.660605 / 10.191392 (0.469213) | 0.140647 / 0.680424 (-0.539777) | 0.015870 / 0.534201 (-0.518331) | 0.287032 / 0.579283 (-0.292251) | 0.276629 / 0.434364 (-0.157735) | 0.331171 / 0.540337 (-0.209166) | 0.575346 / 1.386936 (-0.811590) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#56433c2f6a42d5fcc5acb46c6275911c29afc371 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005014 / 0.011353 (-0.006339) | 0.003434 / 0.011008 (-0.007574) | 0.063283 / 0.038508 (0.024775) | 0.048068 / 0.023109 (0.024959) | 0.239521 / 0.275898 (-0.036377) | 0.265294 / 0.323480 (-0.058186) | 0.003790 / 0.007986 (-0.004196) | 0.002577 / 0.004328 (-0.001751) | 0.048618 / 0.004250 (0.044368) | 0.037427 / 0.037052 (0.000375) | 0.245263 / 0.258489 (-0.013226) | 0.276618 / 0.293841 (-0.017223) | 0.026615 / 0.128546 (-0.101931) | 0.010378 / 0.075646 (-0.065268) | 0.205670 / 0.419271 (-0.213601) | 0.035076 / 0.043533 (-0.008457) | 0.245062 / 0.255139 (-0.010077) | 0.264584 / 0.283200 (-0.018616) | 0.017760 / 0.141683 (-0.123922) | 1.148061 / 1.452155 (-0.304094) | 1.192762 / 1.492716 (-0.299955) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090870 / 0.018006 (0.072864) | 0.305458 / 0.000490 (0.304968) | 0.000207 / 0.000200 (0.000007) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018597 / 0.037411 (-0.018814) | 0.060349 / 0.014526 (0.045823) | 0.074854 / 0.176557 (-0.101702) | 0.123243 / 0.737135 (-0.613892) | 0.075843 / 0.296338 (-0.220496) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.275855 / 0.215209 (0.060645) | 2.723965 / 2.077655 (0.646311) | 1.436010 / 1.504120 (-0.068110) | 1.323495 / 1.541195 (-0.217700) | 1.356234 / 1.468490 (-0.112256) | 0.564388 / 4.584777 (-4.020389) | 2.390180 / 3.745712 (-1.355532) | 2.782863 / 5.269862 (-2.486998) | 1.765048 / 4.565676 (-2.800628) | 0.062680 / 0.424275 (-0.361595) | 0.004929 / 0.007607 (-0.002678) | 0.337578 / 0.226044 (0.111533) | 3.316780 / 2.268929 (1.047851) | 1.803829 / 55.444624 (-53.640795) | 1.524585 / 6.876477 (-5.351891) | 1.549695 / 2.142072 (-0.592377) | 0.638053 / 4.805227 (-4.167174) | 0.116983 / 6.500664 (-6.383681) | 0.042251 / 0.075469 (-0.033218) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.946978 / 1.841788 (-0.894810) | 11.809483 / 8.074308 (3.735175) | 10.459974 / 10.191392 (0.268582) | 0.130015 / 0.680424 (-0.550409) | 0.013843 / 0.534201 (-0.520358) | 0.286972 / 0.579283 (-0.292311) | 0.268904 / 0.434364 (-0.165460) | 0.325591 / 0.540337 (-0.214746) | 0.439233 / 1.386936 (-0.947703) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005804 / 0.011353 (-0.005549) | 0.003431 / 0.011008 (-0.007577) | 0.049041 / 0.038508 (0.010533) | 0.054758 / 0.023109 (0.031649) | 0.262330 / 0.275898 (-0.013568) | 0.288872 / 0.323480 (-0.034608) | 0.004016 / 0.007986 (-0.003970) | 0.002606 / 0.004328 (-0.001722) | 0.047878 / 0.004250 (0.043628) | 0.045066 / 0.037052 (0.008013) | 0.266310 / 0.258489 (0.007820) | 0.290072 / 0.293841 (-0.003768) | 0.028738 / 0.128546 (-0.099809) | 0.010667 / 0.075646 (-0.064979) | 0.057300 / 0.419271 (-0.361972) | 0.032715 / 0.043533 (-0.010818) | 0.264043 / 0.255139 (0.008904) | 0.278652 / 0.283200 (-0.004547) | 0.017873 / 0.141683 (-0.123810) | 1.125981 / 1.452155 (-0.326174) | 1.168548 / 1.492716 (-0.324168) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090997 / 0.018006 (0.072991) | 0.300807 / 0.000490 (0.300317) | 0.000223 / 0.000200 (0.000023) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021510 / 0.037411 (-0.015901) | 0.068251 / 0.014526 (0.053725) | 0.082073 / 0.176557 (-0.094484) | 0.120071 / 0.737135 (-0.617064) | 0.082245 / 0.296338 (-0.214093) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290601 / 0.215209 (0.075392) | 2.871855 / 2.077655 (0.794200) | 1.558239 / 1.504120 (0.054119) | 1.447767 / 1.541195 (-0.093427) | 1.446851 / 1.468490 (-0.021639) | 0.573990 / 4.584777 (-4.010787) | 2.439859 / 3.745712 (-1.305853) | 2.795899 / 5.269862 (-2.473963) | 1.746751 / 4.565676 (-2.818926) | 0.062100 / 0.424275 (-0.362175) | 0.004948 / 0.007607 (-0.002659) | 0.344281 / 0.226044 (0.118236) | 3.427499 / 2.268929 (1.158570) | 1.940348 / 55.444624 (-53.504276) | 1.660926 / 6.876477 (-5.215551) | 1.669485 / 2.142072 (-0.472588) | 0.634034 / 4.805227 (-4.171193) | 0.114748 / 6.500664 (-6.385916) | 0.041617 / 0.075469 (-0.033852) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.966411 / 1.841788 (-0.875376) | 12.040753 / 8.074308 (3.966445) | 10.506542 / 10.191392 (0.315150) | 0.129659 / 0.680424 (-0.550764) | 0.015691 / 0.534201 (-0.518510) | 0.286911 / 0.579283 (-0.292372) | 0.273588 / 0.434364 (-0.160776) | 0.333642 / 0.540337 (-0.206695) | 0.568550 / 1.386936 (-0.818386) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b38ed4705263df92ae06d89baab0932ae10065e0 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005023 / 0.011353 (-0.006330) | 0.003492 / 0.011008 (-0.007516) | 0.062808 / 0.038508 (0.024300) | 0.051649 / 0.023109 (0.028540) | 0.246871 / 0.275898 (-0.029027) | 0.273430 / 0.323480 (-0.050050) | 0.003851 / 0.007986 (-0.004135) | 0.002643 / 0.004328 (-0.001686) | 0.048499 / 0.004250 (0.044248) | 0.037713 / 0.037052 (0.000661) | 0.256431 / 0.258489 (-0.002058) | 0.306956 / 0.293841 (0.013116) | 0.027116 / 0.128546 (-0.101430) | 0.010769 / 0.075646 (-0.064877) | 0.206218 / 0.419271 (-0.213053) | 0.035592 / 0.043533 (-0.007941) | 0.249629 / 0.255139 (-0.005510) | 0.268438 / 0.283200 (-0.014761) | 0.018557 / 0.141683 (-0.123125) | 1.123988 / 1.452155 (-0.328167) | 1.158196 / 1.492716 (-0.334520) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090221 / 0.018006 (0.072215) | 0.300892 / 0.000490 (0.300402) | 0.000209 / 0.000200 (0.000009) | 0.000046 / 0.000054 (-0.000008) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018305 / 0.037411 (-0.019106) | 0.060294 / 0.014526 (0.045769) | 0.073330 / 0.176557 (-0.103227) | 0.119620 / 0.737135 (-0.617515) | 0.074611 / 0.296338 (-0.221727) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285347 / 0.215209 (0.070138) | 2.795144 / 2.077655 (0.717490) | 1.468321 / 1.504120 (-0.035799) | 1.343848 / 1.541195 (-0.197347) | 1.388998 / 1.468490 (-0.079492) | 0.559609 / 4.584777 (-4.025168) | 2.355056 / 3.745712 (-1.390656) | 2.798763 / 5.269862 (-2.471099) | 1.764371 / 4.565676 (-2.801305) | 0.062563 / 0.424275 (-0.361712) | 0.005101 / 0.007607 (-0.002506) | 0.339205 / 0.226044 (0.113161) | 3.336729 / 2.268929 (1.067800) | 1.801987 / 55.444624 (-53.642637) | 1.526720 / 6.876477 (-5.349757) | 1.539324 / 2.142072 (-0.602749) | 0.635805 / 4.805227 (-4.169422) | 0.138762 / 6.500664 (-6.361902) | 0.042092 / 0.075469 (-0.033377) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.928755 / 1.841788 (-0.913032) | 11.468224 / 8.074308 (3.393916) | 10.784568 / 10.191392 (0.593176) | 0.130332 / 0.680424 (-0.550092) | 0.014203 / 0.534201 (-0.519998) | 0.287125 / 0.579283 (-0.292158) | 0.263921 / 0.434364 (-0.170443) | 0.327824 / 0.540337 (-0.212513) | 0.434679 / 1.386936 (-0.952257) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005194 / 0.011353 (-0.006159) | 0.003411 / 0.011008 (-0.007598) | 0.050122 / 0.038508 (0.011614) | 0.049378 / 0.023109 (0.026269) | 0.272980 / 0.275898 (-0.002918) | 0.298047 / 0.323480 (-0.025433) | 0.003945 / 0.007986 (-0.004041) | 0.002633 / 0.004328 (-0.001696) | 0.048935 / 0.004250 (0.044685) | 0.040157 / 0.037052 (0.003104) | 0.277056 / 0.258489 (0.018567) | 0.299824 / 0.293841 (0.005983) | 0.028997 / 0.128546 (-0.099550) | 0.010868 / 0.075646 (-0.064779) | 0.057895 / 0.419271 (-0.361377) | 0.033522 / 0.043533 (-0.010010) | 0.274912 / 0.255139 (0.019773) | 0.288902 / 0.283200 (0.005702) | 0.018016 / 0.141683 (-0.123667) | 1.116669 / 1.452155 (-0.335485) | 1.175007 / 1.492716 (-0.317710) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090169 / 0.018006 (0.072163) | 0.310577 / 0.000490 (0.310087) | 0.000215 / 0.000200 (0.000015) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020448 / 0.037411 (-0.016963) | 0.068216 / 0.014526 (0.053690) | 0.081798 / 0.176557 (-0.094759) | 0.119151 / 0.737135 (-0.617985) | 0.085197 / 0.296338 (-0.211142) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294957 / 0.215209 (0.079748) | 2.874065 / 2.077655 (0.796410) | 1.590963 / 1.504120 (0.086843) | 1.459596 / 1.541195 (-0.081599) | 1.467931 / 1.468490 (-0.000559) | 0.562832 / 4.584777 (-4.021944) | 2.426384 / 3.745712 (-1.319328) | 2.767749 / 5.269862 (-2.502112) | 1.746702 / 4.565676 (-2.818975) | 0.063353 / 0.424275 (-0.360922) | 0.005073 / 0.007607 (-0.002534) | 0.348258 / 0.226044 (0.122213) | 3.390351 / 2.268929 (1.121423) | 1.950092 / 55.444624 (-53.494532) | 1.671227 / 6.876477 (-5.205250) | 1.683349 / 2.142072 (-0.458723) | 0.637613 / 4.805227 (-4.167614) | 0.115172 / 6.500664 (-6.385492) | 0.040202 / 0.075469 (-0.035267) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963085 / 1.841788 (-0.878702) | 11.895384 / 8.074308 (3.821076) | 10.609906 / 10.191392 (0.418513) | 0.130865 / 0.680424 (-0.549559) | 0.016020 / 0.534201 (-0.518181) | 0.287540 / 0.579283 (-0.291743) | 0.278204 / 0.434364 (-0.156160) | 0.326007 / 0.540337 (-0.214330) | 0.590881 / 1.386936 (-0.796055) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c291e330a7d460ff09d867377de1d4c53fd5394c \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005266 / 0.011353 (-0.006087) | 0.003751 / 0.011008 (-0.007257) | 0.063835 / 0.038508 (0.025327) | 0.052688 / 0.023109 (0.029579) | 0.261957 / 0.275898 (-0.013941) | 0.284264 / 0.323480 (-0.039216) | 0.003958 / 0.007986 (-0.004027) | 0.002696 / 0.004328 (-0.001633) | 0.052791 / 0.004250 (0.048540) | 0.038294 / 0.037052 (0.001242) | 0.259488 / 0.258489 (0.000999) | 0.298368 / 0.293841 (0.004528) | 0.028309 / 0.128546 (-0.100237) | 0.010819 / 0.075646 (-0.064827) | 0.208221 / 0.419271 (-0.211050) | 0.036373 / 0.043533 (-0.007160) | 0.257000 / 0.255139 (0.001861) | 0.273108 / 0.283200 (-0.010092) | 0.019674 / 0.141683 (-0.122009) | 1.119196 / 1.452155 (-0.332958) | 1.161613 / 1.492716 (-0.331104) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093408 / 0.018006 (0.075401) | 0.302278 / 0.000490 (0.301788) | 0.000212 / 0.000200 (0.000012) | 0.000074 / 0.000054 (0.000020) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019417 / 0.037411 (-0.017995) | 0.060847 / 0.014526 (0.046321) | 0.075399 / 0.176557 (-0.101158) | 0.121233 / 0.737135 (-0.615902) | 0.076916 / 0.296338 (-0.219422) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.281265 / 0.215209 (0.066056) | 2.651726 / 2.077655 (0.574072) | 1.457726 / 1.504120 (-0.046394) | 1.339250 / 1.541195 (-0.201945) | 1.398529 / 1.468490 (-0.069961) | 0.566574 / 4.584777 (-4.018203) | 2.431576 / 3.745712 (-1.314136) | 2.845884 / 5.269862 (-2.423977) | 1.798051 / 4.565676 (-2.767626) | 0.063619 / 0.424275 (-0.360656) | 0.005286 / 0.007607 (-0.002321) | 0.332834 / 0.226044 (0.106789) | 3.293222 / 2.268929 (1.024293) | 1.837810 / 55.444624 (-53.606815) | 1.568511 / 6.876477 (-5.307966) | 1.627518 / 2.142072 (-0.514555) | 0.643520 / 4.805227 (-4.161708) | 0.118482 / 6.500664 (-6.382182) | 0.049563 / 0.075469 (-0.025906) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.947767 / 1.841788 (-0.894021) | 11.994999 / 8.074308 (3.920691) | 10.662651 / 10.191392 (0.471259) | 0.142070 / 0.680424 (-0.538354) | 0.014276 / 0.534201 (-0.519925) | 0.288455 / 0.579283 (-0.290828) | 0.266335 / 0.434364 (-0.168029) | 0.328455 / 0.540337 (-0.211883) | 0.440740 / 1.386936 (-0.946196) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005636 / 0.011353 (-0.005717) | 0.003664 / 0.011008 (-0.007344) | 0.050340 / 0.038508 (0.011832) | 0.062795 / 0.023109 (0.039685) | 0.280874 / 0.275898 (0.004976) | 0.314056 / 0.323480 (-0.009424) | 0.004089 / 0.007986 (-0.003897) | 0.002780 / 0.004328 (-0.001548) | 0.048468 / 0.004250 (0.044218) | 0.042924 / 0.037052 (0.005871) | 0.281381 / 0.258489 (0.022892) | 0.308232 / 0.293841 (0.014391) | 0.030294 / 0.128546 (-0.098252) | 0.011098 / 0.075646 (-0.064548) | 0.057535 / 0.419271 (-0.361736) | 0.034217 / 0.043533 (-0.009316) | 0.283022 / 0.255139 (0.027883) | 0.298425 / 0.283200 (0.015225) | 0.019285 / 0.141683 (-0.122398) | 1.117722 / 1.452155 (-0.334433) | 1.185878 / 1.492716 (-0.306839) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094915 / 0.018006 (0.076909) | 0.311782 / 0.000490 (0.311293) | 0.000217 / 0.000200 (0.000017) | 0.000054 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022652 / 0.037411 (-0.014759) | 0.069766 / 0.014526 (0.055240) | 0.084495 / 0.176557 (-0.092061) | 0.121295 / 0.737135 (-0.615841) | 0.082447 / 0.296338 (-0.213891) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294286 / 0.215209 (0.079077) | 2.863694 / 2.077655 (0.786039) | 1.578338 / 1.504120 (0.074219) | 1.478737 / 1.541195 (-0.062458) | 1.528569 / 1.468490 (0.060079) | 0.576944 / 4.584777 (-4.007833) | 2.438730 / 3.745712 (-1.306982) | 2.956138 / 5.269862 (-2.313723) | 1.844484 / 4.565676 (-2.721192) | 0.065980 / 0.424275 (-0.358295) | 0.004998 / 0.007607 (-0.002609) | 0.352063 / 0.226044 (0.126019) | 3.456355 / 2.268929 (1.187426) | 1.971582 / 55.444624 (-53.473042) | 1.684536 / 6.876477 (-5.191940) | 1.726823 / 2.142072 (-0.415250) | 0.660235 / 4.805227 (-4.144992) | 0.119029 / 6.500664 (-6.381635) | 0.042497 / 0.075469 (-0.032972) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.971817 / 1.841788 (-0.869970) | 12.900324 / 8.074308 (4.826015) | 10.957495 / 10.191392 (0.766103) | 0.133705 / 0.680424 (-0.546718) | 0.015669 / 0.534201 (-0.518532) | 0.287340 / 0.579283 (-0.291943) | 0.280380 / 0.434364 (-0.153984) | 0.330369 / 0.540337 (-0.209969) | 0.581793 / 1.386936 (-0.805143) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c2af5efae1985499d6a0a1b6ab4120337eebf776 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005038 / 0.011353 (-0.006315) | 0.003737 / 0.011008 (-0.007272) | 0.063118 / 0.038508 (0.024610) | 0.050120 / 0.023109 (0.027011) | 0.240722 / 0.275898 (-0.035176) | 0.263128 / 0.323480 (-0.060352) | 0.003839 / 0.007986 (-0.004147) | 0.002718 / 0.004328 (-0.001610) | 0.047869 / 0.004250 (0.043618) | 0.038092 / 0.037052 (0.001040) | 0.245759 / 0.258489 (-0.012730) | 0.277728 / 0.293841 (-0.016113) | 0.027466 / 0.128546 (-0.101081) | 0.011767 / 0.075646 (-0.063879) | 0.205505 / 0.419271 (-0.213766) | 0.035429 / 0.043533 (-0.008104) | 0.241665 / 0.255139 (-0.013474) | 0.260908 / 0.283200 (-0.022292) | 0.017133 / 0.141683 (-0.124550) | 1.107725 / 1.452155 (-0.344429) | 1.169707 / 1.492716 (-0.323009) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094112 / 0.018006 (0.076106) | 0.302596 / 0.000490 (0.302106) | 0.000237 / 0.000200 (0.000037) | 0.000041 / 0.000054 (-0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017923 / 0.037411 (-0.019488) | 0.060356 / 0.014526 (0.045830) | 0.073708 / 0.176557 (-0.102849) | 0.119952 / 0.737135 (-0.617183) | 0.075350 / 0.296338 (-0.220989) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289253 / 0.215209 (0.074044) | 2.800772 / 2.077655 (0.723117) | 1.538368 / 1.504120 (0.034248) | 1.401037 / 1.541195 (-0.140158) | 1.427170 / 1.468490 (-0.041320) | 0.560497 / 4.584777 (-4.024280) | 2.417844 / 3.745712 (-1.327868) | 2.798377 / 5.269862 (-2.471484) | 1.756517 / 4.565676 (-2.809160) | 0.063897 / 0.424275 (-0.360378) | 0.005323 / 0.007607 (-0.002284) | 0.339881 / 0.226044 (0.113836) | 3.354858 / 2.268929 (1.085929) | 1.877233 / 55.444624 (-53.567391) | 1.578713 / 6.876477 (-5.297764) | 1.631898 / 2.142072 (-0.510175) | 0.640303 / 4.805227 (-4.164924) | 0.116731 / 6.500664 (-6.383933) | 0.041978 / 0.075469 (-0.033491) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963259 / 1.841788 (-0.878529) | 11.983646 / 8.074308 (3.909338) | 10.561596 / 10.191392 (0.370204) | 0.135863 / 0.680424 (-0.544561) | 0.015607 / 0.534201 (-0.518594) | 0.295164 / 0.579283 (-0.284119) | 0.283366 / 0.434364 (-0.150998) | 0.341848 / 0.540337 (-0.198489) | 0.448359 / 1.386936 (-0.938577) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005366 / 0.011353 (-0.005987) | 0.003621 / 0.011008 (-0.007387) | 0.048615 / 0.038508 (0.010107) | 0.053950 / 0.023109 (0.030841) | 0.273112 / 0.275898 (-0.002786) | 0.295655 / 0.323480 (-0.027825) | 0.004066 / 0.007986 (-0.003920) | 0.002700 / 0.004328 (-0.001628) | 0.047899 / 0.004250 (0.043648) | 0.041633 / 0.037052 (0.004581) | 0.277760 / 0.258489 (0.019271) | 0.302068 / 0.293841 (0.008227) | 0.028879 / 0.128546 (-0.099668) | 0.010756 / 0.075646 (-0.064891) | 0.057190 / 0.419271 (-0.362082) | 0.032555 / 0.043533 (-0.010978) | 0.272045 / 0.255139 (0.016906) | 0.289330 / 0.283200 (0.006130) | 0.018466 / 0.141683 (-0.123216) | 1.180435 / 1.452155 (-0.271720) | 1.192228 / 1.492716 (-0.300488) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094871 / 0.018006 (0.076864) | 0.302552 / 0.000490 (0.302062) | 0.000224 / 0.000200 (0.000024) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022008 / 0.037411 (-0.015403) | 0.068528 / 0.014526 (0.054002) | 0.081735 / 0.176557 (-0.094821) | 0.120990 / 0.737135 (-0.616145) | 0.083155 / 0.296338 (-0.213184) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.305030 / 0.215209 (0.089821) | 3.009812 / 2.077655 (0.932158) | 1.677773 / 1.504120 (0.173654) | 1.552280 / 1.541195 (0.011085) | 1.606248 / 1.468490 (0.137758) | 0.557093 / 4.584777 (-4.027684) | 2.418292 / 3.745712 (-1.327420) | 2.813049 / 5.269862 (-2.456813) | 1.764507 / 4.565676 (-2.801169) | 0.065089 / 0.424275 (-0.359186) | 0.004944 / 0.007607 (-0.002663) | 0.360672 / 0.226044 (0.134628) | 3.525850 / 2.268929 (1.256921) | 2.030091 / 55.444624 (-53.414533) | 1.754669 / 6.876477 (-5.121807) | 1.772673 / 2.142072 (-0.369399) | 0.642904 / 4.805227 (-4.162324) | 0.116018 / 6.500664 (-6.384646) | 0.041308 / 0.075469 (-0.034161) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.986386 / 1.841788 (-0.855401) | 12.291623 / 8.074308 (4.217315) | 10.655932 / 10.191392 (0.464540) | 0.141736 / 0.680424 (-0.538688) | 0.016669 / 0.534201 (-0.517532) | 0.286875 / 0.579283 (-0.292408) | 0.281898 / 0.434364 (-0.152466) | 0.325206 / 0.540337 (-0.215132) | 0.577607 / 1.386936 (-0.809329) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1cf33502493fb9760ea8cc8e51622bf94d0c9e31 \"CML watermark\")\n", "Alright tests are passing (except one on temp dir cleanup windows but I don't think it's related to this PR ?)\r\n\r\n```\r\nFAILED tests/test_load.py::test_loading_from_the_datasets_hub - NotADirectoryError: [WinError 267] The directory name is invalid: 'C:\\\\Users\\\\RUNNER~1\\\\AppData\\\\Local\\\\Temp\\\\tmpqy3f2ft_\\\\hf-internal-testing___dataset_with_script\\\\default\\\\0.0.0\\\\c240e2be3370bdbd\\\\dataset_with_script-train.arrow'\r\n```", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005072 / 0.011353 (-0.006281) | 0.003449 / 0.011008 (-0.007559) | 0.062630 / 0.038508 (0.024122) | 0.054276 / 0.023109 (0.031167) | 0.253345 / 0.275898 (-0.022553) | 0.273460 / 0.323480 (-0.050020) | 0.003859 / 0.007986 (-0.004127) | 0.002646 / 0.004328 (-0.001683) | 0.048289 / 0.004250 (0.044038) | 0.037943 / 0.037052 (0.000891) | 0.256569 / 0.258489 (-0.001920) | 0.287809 / 0.293841 (-0.006032) | 0.027675 / 0.128546 (-0.100872) | 0.010554 / 0.075646 (-0.065092) | 0.205157 / 0.419271 (-0.214115) | 0.035464 / 0.043533 (-0.008069) | 0.254300 / 0.255139 (-0.000839) | 0.272907 / 0.283200 (-0.010292) | 0.018146 / 0.141683 (-0.123537) | 1.110528 / 1.452155 (-0.341626) | 1.170156 / 1.492716 (-0.322560) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093151 / 0.018006 (0.075144) | 0.302087 / 0.000490 (0.301598) | 0.000216 / 0.000200 (0.000016) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018744 / 0.037411 (-0.018667) | 0.059843 / 0.014526 (0.045317) | 0.073165 / 0.176557 (-0.103391) | 0.120464 / 0.737135 (-0.616671) | 0.074992 / 0.296338 (-0.221347) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285103 / 0.215209 (0.069894) | 2.820254 / 2.077655 (0.742600) | 1.505336 / 1.504120 (0.001216) | 1.368631 / 1.541195 (-0.172564) | 1.404140 / 1.468490 (-0.064350) | 0.563906 / 4.584777 (-4.020871) | 2.411871 / 3.745712 (-1.333841) | 2.788390 / 5.269862 (-2.481471) | 1.749788 / 4.565676 (-2.815888) | 0.062171 / 0.424275 (-0.362104) | 0.004918 / 0.007607 (-0.002689) | 0.339615 / 0.226044 (0.113571) | 3.337789 / 2.268929 (1.068861) | 1.808445 / 55.444624 (-53.636180) | 1.541015 / 6.876477 (-5.335462) | 1.572389 / 2.142072 (-0.569683) | 0.641739 / 4.805227 (-4.163488) | 0.115844 / 6.500664 (-6.384820) | 0.042504 / 0.075469 (-0.032965) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.942463 / 1.841788 (-0.899325) | 11.602364 / 8.074308 (3.528056) | 10.628921 / 10.191392 (0.437529) | 0.136154 / 0.680424 (-0.544270) | 0.013842 / 0.534201 (-0.520359) | 0.287085 / 0.579283 (-0.292198) | 0.269860 / 0.434364 (-0.164503) | 0.329525 / 0.540337 (-0.210812) | 0.441287 / 1.386936 (-0.945649) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005215 / 0.011353 (-0.006138) | 0.003549 / 0.011008 (-0.007460) | 0.049199 / 0.038508 (0.010691) | 0.051655 / 0.023109 (0.028545) | 0.272150 / 0.275898 (-0.003748) | 0.291978 / 0.323480 (-0.031502) | 0.003985 / 0.007986 (-0.004001) | 0.002668 / 0.004328 (-0.001661) | 0.048524 / 0.004250 (0.044274) | 0.039824 / 0.037052 (0.002772) | 0.275566 / 0.258489 (0.017077) | 0.298076 / 0.293841 (0.004235) | 0.029508 / 0.128546 (-0.099038) | 0.010673 / 0.075646 (-0.064973) | 0.057327 / 0.419271 (-0.361944) | 0.032590 / 0.043533 (-0.010943) | 0.273295 / 0.255139 (0.018156) | 0.289127 / 0.283200 (0.005928) | 0.017694 / 0.141683 (-0.123989) | 1.134502 / 1.452155 (-0.317653) | 1.185603 / 1.492716 (-0.307114) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098403 / 0.018006 (0.080396) | 0.302735 / 0.000490 (0.302245) | 0.000228 / 0.000200 (0.000028) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025192 / 0.037411 (-0.012219) | 0.068149 / 0.014526 (0.053623) | 0.082220 / 0.176557 (-0.094336) | 0.119491 / 0.737135 (-0.617645) | 0.082484 / 0.296338 (-0.213855) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295339 / 0.215209 (0.080130) | 2.868411 / 2.077655 (0.790757) | 1.590665 / 1.504120 (0.086545) | 1.465995 / 1.541195 (-0.075200) | 1.489205 / 1.468490 (0.020715) | 0.562503 / 4.584777 (-4.022274) | 2.480100 / 3.745712 (-1.265613) | 2.774216 / 5.269862 (-2.495646) | 1.733129 / 4.565676 (-2.832548) | 0.062698 / 0.424275 (-0.361577) | 0.004910 / 0.007607 (-0.002697) | 0.354766 / 0.226044 (0.128722) | 3.435541 / 2.268929 (1.166613) | 1.953357 / 55.444624 (-53.491267) | 1.673584 / 6.876477 (-5.202893) | 1.677749 / 2.142072 (-0.464323) | 0.632601 / 4.805227 (-4.172626) | 0.114875 / 6.500664 (-6.385789) | 0.040577 / 0.075469 (-0.034892) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967003 / 1.841788 (-0.874785) | 11.964490 / 8.074308 (3.890181) | 10.493812 / 10.191392 (0.302420) | 0.132177 / 0.680424 (-0.548247) | 0.015149 / 0.534201 (-0.519052) | 0.289011 / 0.579283 (-0.290272) | 0.285479 / 0.434364 (-0.148885) | 0.327090 / 0.540337 (-0.213248) | 0.571747 / 1.386936 (-0.815189) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4c9b4cb7ee4720415261216d72051e2a3320fe41 \"CML watermark\")\n" ]
2023-11-23T17:31:57Z
2023-12-01T17:57:17Z
2023-12-01T17:50:59Z
MEMBER
null
null
null
The idea is to make this code work for datasets with scripts if they have a Parquet export ```python ds = load_dataset("squad", trust_remote_code=False) ``` And more generally, it means we use the Parquet export whenever it's possible (it's safer and faster than dataset scripts). I also added a `config.USE_PARQUET_EXPORT` variable to use in the datasets-server parquet conversion job - [x] Needs https://github.com/huggingface/datasets/pull/6429 to be merged first cc @severo I use the /parquet and /info endpoints from datasets-server
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 2, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6448/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6448/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6448.diff", "html_url": "https://github.com/huggingface/datasets/pull/6448", "merged_at": "2023-12-01T17:50:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/6448.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6448" }
https://api.github.com/repos/huggingface/datasets/issues/7459
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7459/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7459/comments
https://api.github.com/repos/huggingface/datasets/issues/7459/events
https://github.com/huggingface/datasets/pull/7459
2,925,491,766
PR_kwDODunzps6O8pWp
7,459
Fix data_files filtering
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7459). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2025-03-17T15:20:21Z
2025-03-17T15:25:56Z
2025-03-17T15:25:54Z
MEMBER
null
null
null
close https://github.com/huggingface/datasets/issues/7458
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7459/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7459/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7459.diff", "html_url": "https://github.com/huggingface/datasets/pull/7459", "merged_at": "2025-03-17T15:25:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/7459.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7459" }
https://api.github.com/repos/huggingface/datasets/issues/7102
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7102/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7102/comments
https://api.github.com/repos/huggingface/datasets/issues/7102/events
https://github.com/huggingface/datasets/issues/7102
2,466,893,106
I_kwDODunzps6TCc0y
7,102
Slow iteration speeds when using IterableDataset.shuffle with load_dataset(data_files=..., streaming=True)
{ "avatar_url": "https://avatars.githubusercontent.com/u/13192126?v=4", "events_url": "https://api.github.com/users/lajd/events{/privacy}", "followers_url": "https://api.github.com/users/lajd/followers", "following_url": "https://api.github.com/users/lajd/following{/other_user}", "gists_url": "https://api.github.com/users/lajd/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lajd", "id": 13192126, "login": "lajd", "node_id": "MDQ6VXNlcjEzMTkyMTI2", "organizations_url": "https://api.github.com/users/lajd/orgs", "received_events_url": "https://api.github.com/users/lajd/received_events", "repos_url": "https://api.github.com/users/lajd/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lajd/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lajd/subscriptions", "type": "User", "url": "https://api.github.com/users/lajd", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "Hi @lajd , I was skeptical about how we are saving the shards each as their own dataset (arrow file) in the script above, and so I updated the script to try out saving the shards in a few different file formats. From the experiments I ran, I saw binary format show significantly the best performance, with arrow and parquet about the same. However, I was unable to reproduce a drastically slower iteration speed after shuffling in any case when using the revised script -- pasting below:\r\n\r\n```python\r\nimport time\r\nfrom datasets import load_dataset, Dataset, IterableDataset\r\nfrom pathlib import Path\r\nimport torch\r\nimport pandas as pd\r\nimport pickle\r\nimport pyarrow as pa\r\nimport pyarrow.parquet as pq\r\n\r\n\r\ndef generate_random_example():\r\n return {\r\n 'inputs': torch.randn(128).tolist(),\r\n 'indices': torch.randint(0, 10000, (2, 20000)).tolist(),\r\n 'values': torch.randn(20000).tolist(),\r\n }\r\n\r\n\r\ndef generate_shard_data(examples_per_shard: int = 512):\r\n return [generate_random_example() for _ in range(examples_per_shard)]\r\n\r\n\r\ndef save_shard_as_arrow(shard_idx, save_dir, examples_per_shard):\r\n # Generate shard data\r\n shard_data = generate_shard_data(examples_per_shard)\r\n\r\n # Convert data to a Hugging Face Dataset\r\n dataset = Dataset.from_dict({\r\n 'inputs': [example['inputs'] for example in shard_data],\r\n 'indices': [example['indices'] for example in shard_data],\r\n 'values': [example['values'] for example in shard_data],\r\n })\r\n\r\n # Define the shard save path\r\n shard_write_path = Path(save_dir) / f\"shard_{shard_idx}\"\r\n\r\n # Save the dataset to disk using the Arrow format\r\n dataset.save_to_disk(str(shard_write_path))\r\n\r\n return str(shard_write_path)\r\n\r\n\r\ndef save_shard_as_parquet(shard_idx, save_dir, examples_per_shard):\r\n # Generate shard data\r\n shard_data = generate_shard_data(examples_per_shard)\r\n\r\n # Convert data to a pandas DataFrame for easy conversion to Parquet\r\n df = pd.DataFrame(shard_data)\r\n\r\n # Define the shard save path\r\n shard_write_path = Path(save_dir) / f\"shard_{shard_idx}.parquet\"\r\n\r\n # Convert DataFrame to PyArrow Table for Parquet saving\r\n table = pa.Table.from_pandas(df)\r\n\r\n # Save the table as a Parquet file\r\n pq.write_table(table, shard_write_path)\r\n\r\n return str(shard_write_path)\r\n\r\n\r\ndef save_shard_as_binary(shard_idx, save_dir, examples_per_shard):\r\n # Generate shard data\r\n shard_data = generate_shard_data(examples_per_shard)\r\n\r\n # Define the shard save path\r\n shard_write_path = Path(save_dir) / f\"shard_{shard_idx}.bin\"\r\n\r\n # Save each example as a serialized binary object using pickle\r\n with open(shard_write_path, 'wb') as f:\r\n for example in shard_data:\r\n f.write(pickle.dumps(example))\r\n\r\n return str(shard_write_path)\r\n\r\n\r\ndef generate_split_shards(save_dir, filetype=\"parquet\", num_shards: int = 16, examples_per_shard: int = 512):\r\n shard_filepaths = []\r\n for shard_idx in range(num_shards):\r\n if filetype == \"parquet\":\r\n shard_filepaths.append(save_shard_as_parquet(shard_idx, save_dir, examples_per_shard))\r\n elif filetype == \"binary\":\r\n shard_filepaths.append(save_shard_as_binary(shard_idx, save_dir, examples_per_shard))\r\n elif filetype == \"arrow\":\r\n shard_filepaths.append(save_shard_as_arrow(shard_idx, save_dir, examples_per_shard))\r\n else:\r\n raise ValueError(f\"Unsupported filetype: {filetype}. Choose either 'parquet' or 'binary'.\")\r\n return shard_filepaths\r\n\r\n\r\ndef _binary_dataset_generator(files):\r\n for filepath in files:\r\n with open(filepath, 'rb') as f:\r\n while True:\r\n try:\r\n example = pickle.load(f)\r\n yield example\r\n except EOFError:\r\n break\r\n\r\n\r\ndef load_binary_dataset(shard_filepaths):\r\n return IterableDataset.from_generator(\r\n _binary_dataset_generator, gen_kwargs={\"files\": shard_filepaths},\r\n )\r\n\r\n\r\ndef load_parquet_dataset(shard_filepaths):\r\n # Load the dataset as an IterableDataset\r\n return load_dataset(\r\n \"parquet\",\r\n data_files={split: shard_filepaths},\r\n streaming=True,\r\n split=split,\r\n )\r\n\r\n\r\ndef load_arrow_dataset(shard_filepaths):\r\n # Load the dataset as an IterableDataset\r\n shard_filepaths = [f + \"/data-00000-of-00001.arrow\" for f in shard_filepaths]\r\n return load_dataset(\r\n \"arrow\",\r\n data_files={split: shard_filepaths},\r\n streaming=True,\r\n split=split,\r\n )\r\n\r\n\r\ndef load_dataset_wrapper(filetype: str, shard_filepaths: list[str]):\r\n if filetype == \"parquet\":\r\n return load_parquet_dataset(shard_filepaths)\r\n if filetype == \"binary\":\r\n return load_binary_dataset(shard_filepaths)\r\n if filetype == \"arrow\":\r\n return load_arrow_dataset(shard_filepaths)\r\n else:\r\n raise ValueError(\"Unsupported filetype\")\r\n\r\n\r\n# Example usage:\r\nsplit = \"train\"\r\nsplit_save_dir = \"/tmp/random_split\"\r\n\r\nfiletype = \"binary\" # or \"parquet\", or \"arrow\"\r\nnum_shards = 16\r\n\r\nshard_filepaths = generate_split_shards(split_save_dir, filetype=filetype, num_shards=num_shards)\r\ndataset = load_dataset_wrapper(filetype=filetype, shard_filepaths=shard_filepaths)\r\n\r\ndataset = dataset.shuffle(buffer_size=100, seed=42)\r\n\r\nstart_time = time.time()\r\nfor count, item in enumerate(dataset):\r\n if count > 0 and count % 100 == 0:\r\n elapsed_time = time.time() - start_time\r\n iterations_per_second = count / elapsed_time\r\n print(f\"Processed {count} items at an average of {iterations_per_second:.2f} iterations/second\")\r\n```", "update: I was able to reproduce the issue you described -- but ONLY if I do \r\n\r\n```\r\nrandom_dataset = random_dataset.with_format(\"numpy\")\r\n```\r\n\r\nIf I do this, I see similar numbers as what you reported. If I do not use numpy format, parquet and arrow are about 17 iterations per second regardless of whether or not we shuffle. Using binary, (again no numpy format tried with this yet), still shows the fastest speeds on average (shuffle and no shuffle) of about 850 it/sec.\r\n\r\nI suspect some issues with arrow and numpy being optimized for sequential reads, and shuffling cuases issuses... hmm" ]
2024-08-14T21:44:44Z
2024-08-15T16:17:31Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When I load a dataset from a number of arrow files, as in: ``` random_dataset = load_dataset( "arrow", data_files={split: shard_filepaths}, streaming=True, split=split, ) ``` I'm able to get fast iteration speeds when iterating over the dataset without shuffling. When I shuffle the dataset, the iteration speed is reduced by ~1000x. It's very possible the way I'm loading dataset shards is not appropriate; if so please advise! Thanks for the help ### Steps to reproduce the bug Here's full code to reproduce the issue: - Generate a random dataset - Create shards of data independently using Dataset.save_to_disk() - The below will generate 16 shards (arrow files), of 512 examples each ``` import time from pathlib import Path from multiprocessing import Pool, cpu_count import torch from datasets import Dataset, load_dataset split = "train" split_save_dir = "/tmp/random_split" def generate_random_example(): return { 'inputs': torch.randn(128).tolist(), 'indices': torch.randint(0, 10000, (2, 20000)).tolist(), 'values': torch.randn(20000).tolist(), } def generate_shard_dataset(examples_per_shard: int = 512): dataset_dict = { 'inputs': [], 'indices': [], 'values': [] } for _ in range(examples_per_shard): example = generate_random_example() dataset_dict['inputs'].append(example['inputs']) dataset_dict['indices'].append(example['indices']) dataset_dict['values'].append(example['values']) return Dataset.from_dict(dataset_dict) def save_shard(shard_idx, save_dir, examples_per_shard): shard_dataset = generate_shard_dataset(examples_per_shard) shard_write_path = Path(save_dir) / f"shard_{shard_idx}" shard_dataset.save_to_disk(shard_write_path) return str(Path(shard_write_path) / "data-00000-of-00001.arrow") def generate_split_shards(save_dir, num_shards: int = 16, examples_per_shard: int = 512): with Pool(cpu_count()) as pool: args = [(m, save_dir, examples_per_shard) for m in range(num_shards)] shard_filepaths = pool.starmap(save_shard, args) return shard_filepaths shard_filepaths = generate_split_shards(split_save_dir) ``` Load the dataset as IterableDataset: ``` random_dataset = load_dataset( "arrow", data_files={split: shard_filepaths}, streaming=True, split=split, ) random_dataset = random_dataset.with_format("numpy") ``` Observe the iterations/second when iterating over the dataset directly, and applying shuffling before iterating: Without shuffling, this gives ~1500 iterations/second ``` start_time = time.time() for count, item in enumerate(random_dataset): if count > 0 and count % 100 == 0: elapsed_time = time.time() - start_time iterations_per_second = count / elapsed_time print(f"Processed {count} items at an average of {iterations_per_second:.2f} iterations/second") ``` ``` Processed 100 items at an average of 705.74 iterations/second Processed 200 items at an average of 1169.68 iterations/second Processed 300 items at an average of 1497.97 iterations/second Processed 400 items at an average of 1739.62 iterations/second Processed 500 items at an average of 1931.11 iterations/second` ``` When shuffling, this gives ~3 iterations/second: ``` random_dataset = random_dataset.shuffle(buffer_size=100,seed=42) start_time = time.time() for count, item in enumerate(random_dataset): if count > 0 and count % 100 == 0: elapsed_time = time.time() - start_time iterations_per_second = count / elapsed_time print(f"Processed {count} items at an average of {iterations_per_second:.2f} iterations/second") ``` ``` Processed 100 items at an average of 3.75 iterations/second Processed 200 items at an average of 3.93 iterations/second ``` ### Expected behavior Iterations per second should be barely affected by shuffling, especially with a small buffer size ### Environment info Datasets version: 2.21.0 Python 3.10 Ubuntu 22.04
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7102/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7102/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7145
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7145/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7145/comments
https://api.github.com/repos/huggingface/datasets/issues/7145/events
https://github.com/huggingface/datasets/pull/7145
2,519,789,724
PR_kwDODunzps57Kjjc
7,145
Release: 3.0.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7145). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2024-09-11T13:41:47Z
2024-09-11T13:48:42Z
2024-09-11T13:48:41Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7145/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7145/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7145.diff", "html_url": "https://github.com/huggingface/datasets/pull/7145", "merged_at": "2024-09-11T13:48:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/7145.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7145" }
https://api.github.com/repos/huggingface/datasets/issues/7024
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7024/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7024/comments
https://api.github.com/repos/huggingface/datasets/issues/7024/events
https://github.com/huggingface/datasets/issues/7024
2,390,141,626
I_kwDODunzps6Odqq6
7,024
Streaming dataset not returning data
{ "avatar_url": "https://avatars.githubusercontent.com/u/91670254?v=4", "events_url": "https://api.github.com/users/johnwee1/events{/privacy}", "followers_url": "https://api.github.com/users/johnwee1/followers", "following_url": "https://api.github.com/users/johnwee1/following{/other_user}", "gists_url": "https://api.github.com/users/johnwee1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/johnwee1", "id": 91670254, "login": "johnwee1", "node_id": "U_kgDOBXbG7g", "organizations_url": "https://api.github.com/users/johnwee1/orgs", "received_events_url": "https://api.github.com/users/johnwee1/received_events", "repos_url": "https://api.github.com/users/johnwee1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/johnwee1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/johnwee1/subscriptions", "type": "User", "url": "https://api.github.com/users/johnwee1", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2024-07-04T07:21:47Z
2024-07-04T07:21:47Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I'm deciding to post here because I'm still not sure what the issue is, or if I am using IterableDatasets wrongly. I'm following the guide on here https://huggingface.co/learn/cookbook/en/fine_tuning_code_llm_on_single_gpu pretty much to a tee and have verified that it works when I'm fine-tuning on the provided dataset. However, I'm doing some data preprocessing steps (filtering out entries), when I try to swap out the dataset for mine, it fails to train. However, I eventually fixed this by simply setting `stream=False` in `load_dataset`. Coud this be some sort of network / firewall issue I'm facing? ### Steps to reproduce the bug I made a post with greater description about how I reproduced this problem before I found my workaround: https://discuss.huggingface.co/t/problem-with-custom-iterator-of-streaming-dataset-not-returning-anything/94551 Here is the problematic dataset snippet, which works when streaming=False (and with buffer keyword removed from shuffle) ``` commitpackft = load_dataset( "chargoddard/commitpack-ft-instruct", split="train", streaming=True ).filter(lambda example: example["language"] == "Python") def form_template(example): """Forms a template for each example following the alpaca format for CommitPack""" example["content"] = ( "### Human: " + example["instruction"] + " " + example["input"] + " ### Assistant: " + example["output"] ) return example dataset = commitpackft.map( form_template, remove_columns=["id", "language", "license", "instruction", "input", "output"], ).shuffle( seed=42, buffer_size=10000 ) # remove everything since its all inside "content" now validation_data = dataset.take(4000) train_data = dataset.skip(4000) ``` The annoying part about this is that it only fails during training and I don't know when it will fail, except that it always fails during evaluation. ### Expected behavior The expected behavior is that I should be able to get something from the iterator when called instead of getting nothing / stuck in a loop somewhere. ### Environment info - `datasets` version: 2.20.0 - Platform: Linux-5.4.0-121-generic-x86_64-with-glibc2.31 - Python version: 3.11.7 - `huggingface_hub` version: 0.23.4 - PyArrow version: 16.1.0 - Pandas version: 2.2.2 - `fsspec` version: 2024.5.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7024/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7024/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6907
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6907/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6907/comments
https://api.github.com/repos/huggingface/datasets/issues/6907/events
https://github.com/huggingface/datasets/issues/6907
2,303,855,833
I_kwDODunzps6JUgzZ
6,907
Support the deserialization of json lines files comprised of lists
{ "avatar_url": "https://avatars.githubusercontent.com/u/8473183?v=4", "events_url": "https://api.github.com/users/umarbutler/events{/privacy}", "followers_url": "https://api.github.com/users/umarbutler/followers", "following_url": "https://api.github.com/users/umarbutler/following{/other_user}", "gists_url": "https://api.github.com/users/umarbutler/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/umarbutler", "id": 8473183, "login": "umarbutler", "node_id": "MDQ6VXNlcjg0NzMxODM=", "organizations_url": "https://api.github.com/users/umarbutler/orgs", "received_events_url": "https://api.github.com/users/umarbutler/received_events", "repos_url": "https://api.github.com/users/umarbutler/repos", "site_admin": false, "starred_url": "https://api.github.com/users/umarbutler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/umarbutler/subscriptions", "type": "User", "url": "https://api.github.com/users/umarbutler", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[ "Update: I ended up deciding to go back to use lines of dictionaries instead of arrays, not because of this issue as my users would be capable of downloading my corpus without `datasets`, but the speed and storage savings are not currently worth breaking my API and harming the backwards compatibility of each new revision.\r\n\r\nWith that said, for a static dataset that is not regularly updated like mine, and particularly for extremely large datasets with millions or billions of rows, using arrays could have a meaningful impact, and so there is probably still value in supporting this structure, provided the effort is not too much." ]
2024-05-18T05:07:23Z
2024-05-18T08:53:28Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Feature request I manage a somewhat large and popular Hugging Face dataset known as the [Open Australian Legal Corpus](https://huggingface.co/datasets/umarbutler/open-australian-legal-corpus). I recently updated my corpus to be stored in a json lines file where each line is an array and each element represents a value at a particular column. Previously, my corpus was stored as a json lines file where each line was a dictionary and the keys were the fields. Essentially, a line in my json lines file used to look like this: ```json {"version_id":"","type":"","jurisdiction":"","source":"","citation":"","url":"","when_scraped":"","text":""} ``` And now it looks like this: ```json ["","","","","","","",""] ``` This saves 65 bytes per document and allows me very quickly serialise and deserialise documents via `msgspec`. After making this change, I found that `datasets` was incapable of deserialising my Corpus without a custom loading script, even if I ensured that the `dataset_info` field in my dataset card contained the desired names of my features. I would like to request that functionality be added to support this format which is more memory-efficent and faster than using dictionaries. ### Motivation The [documentation](https://huggingface.co/docs/datasets/en/dataset_script) for creating dataset loading scripts asserts that: > In the next major release, the new safety features of 🤗 Datasets will disable running dataset loading scripts by default, and you will have to pass trust_remote_code=True to load datasets that require running a dataset script. I would rather not require my users to pass `trust_remote_code=True` which means that I will need built-in support for this format. ### Your contribution I would be happy to submit a PR for this if this is something you would incorporate into `datasets` and if I can be pointed to where the code would need to go.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6907/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6907/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7440
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7440/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7440/comments
https://api.github.com/repos/huggingface/datasets/issues/7440/events
https://github.com/huggingface/datasets/issues/7440
2,903,740,662
I_kwDODunzps6tE5D2
7,440
IterableDataset raises FileNotFoundError instead of retrying
{ "avatar_url": "https://avatars.githubusercontent.com/u/145220868?v=4", "events_url": "https://api.github.com/users/bauwenst/events{/privacy}", "followers_url": "https://api.github.com/users/bauwenst/followers", "following_url": "https://api.github.com/users/bauwenst/following{/other_user}", "gists_url": "https://api.github.com/users/bauwenst/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bauwenst", "id": 145220868, "login": "bauwenst", "node_id": "U_kgDOCKflBA", "organizations_url": "https://api.github.com/users/bauwenst/orgs", "received_events_url": "https://api.github.com/users/bauwenst/received_events", "repos_url": "https://api.github.com/users/bauwenst/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bauwenst/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bauwenst/subscriptions", "type": "User", "url": "https://api.github.com/users/bauwenst", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "I have since been training more models with identical architectures over the same dataset, and it is completely unstable. One has now failed at chunk9/1215, whilst others have gotten past that.\n```python\nFileNotFoundError: zstd://example_train_1215.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk9/example_train_1215.jsonl.zst\n```\nBelow is the full training log, where you can clearly see the intermittent dataset issues. Note again that this model only got to epoch 0.11, whereas I have other models training on the exact same dataset right now that have gotten way beyond that. This is quickly turning into a highly expensive bug which I didn't have issues with in the past half year of using the same setup.\n<details>\n<summary>Training log of failed run</summary>\n\n```python\n 1%| | 64/8192 [56:27<87:25:33, 38.72s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 5ef28452-e903-4bd8-946d-f0c77f558a2a)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_4799.jsonl.zst\n 1%| | 64/8192 [56:51<87:25:33, 38.72s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:40:14<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: ba6e4c51-f4a4-407e-9934-3772550b7ce9)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_2770.jsonl.zst\n 2%|▏ | 192/8192 [2:40:53<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:40:53<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: bdf2cfaa-7e0b-46a0-bec1-b1e573fa7998)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_4386.jsonl.zst\n 2%|▏ | 192/8192 [2:42:16<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:42:16<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 1dc5e455-8042-4c7b-9b97-5ded33dfea34)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_1763.jsonl.zst\n 2%|▏ | 192/8192 [2:42:30<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:42:30<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 9cf29917-8111-41fe-80aa-953df65c5803)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_5509.jsonl.zst\n 2%|▏ | 192/8192 [2:44:31<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:44:31<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 2515a0b0-3d81-409f-940c-e78ed5e2dbf8)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_3093.jsonl.zst\n 2%|▏ | 192/8192 [2:45:13<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:45:13<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: a4c1e0c7-1c7a-4377-bc7e-6f076473072b)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_3422.jsonl.zst\n 2%|▏ | 192/8192 [2:46:26<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:46:26<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c7b0d366-db86-4d0c-a4e0-be251d26519e)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_2250.jsonl.zst\n 2%|▏ | 192/8192 [2:47:24<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:47:24<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: b0df5a1a-4836-46cf-8e45-58a7c1553309)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_6161.jsonl.zst\n 2%|▏ | 192/8192 [2:49:10<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n 2%|▏ | 192/8192 [2:49:10<85:29:44, 38.47s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c1d97368-c0ae-45bb-ae10-5559b3ebc4e4)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_5782.jsonl.zst\n 2%|▏ | 192/8192 [2:49:30<85:29:44, 38.47s/it]Retrying in 1s [Retry 1/5].\n{'eval_loss': 10.482319831848145, 'eval_runtime': 902.7516, 'eval_samples_per_second': 18.149, 'eval_steps_per_second': 0.142, 'epoch': 0, 'num_input_tokens_seen': 0}\n{'loss': 10.4895, 'grad_norm': 2.9147818088531494, 'learning_rate': 3.90625e-06, 'epoch': 0.0, 'num_input_tokens_seen': 1048576}\n{'loss': 10.4832, 'grad_norm': 2.8206892013549805, 'learning_rate': 7.8125e-06, 'epoch': 0.0, 'num_input_tokens_seen': 2097152}\n{'loss': 10.4851, 'grad_norm': 2.910552978515625, 'learning_rate': 1.171875e-05, 'epoch': 0.0, 'num_input_tokens_seen': 3145728}\n{'loss': 10.486, 'grad_norm': 2.8042073249816895, 'learning_rate': 1.5625e-05, 'epoch': 0.0, 'num_input_tokens_seen': 4194304}\n{'loss': 10.4719, 'grad_norm': 2.83260440826416, 'learning_rate': 1.953125e-05, 'epoch': 0.0, 'num_input_tokens_seen': 5242880}\n{'loss': 10.4482, 'grad_norm': 2.916527032852173, 'learning_rate': 2.34375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 6291456}\n{'loss': 10.4113, 'grad_norm': 2.911870241165161, 'learning_rate': 2.734375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 7340032}\n{'loss': 10.3863, 'grad_norm': 2.8873367309570312, 'learning_rate': 3.125e-05, 'epoch': 0.0, 'num_input_tokens_seen': 8388608}\n{'loss': 10.3557, 'grad_norm': 2.7183432579040527, 'learning_rate': 3.5156250000000004e-05, 'epoch': 0.0, 'num_input_tokens_seen': 9437184}\n{'loss': 10.2795, 'grad_norm': 2.6743927001953125, 'learning_rate': 3.90625e-05, 'epoch': 0.0, 'num_input_tokens_seen': 10485760}\n{'loss': 10.2148, 'grad_norm': 2.3173940181732178, 'learning_rate': 4.296875e-05, 'epoch': 0.0, 'num_input_tokens_seen': 11534336}\n{'loss': 10.1482, 'grad_norm': 2.09787917137146, 'learning_rate': 4.6875e-05, 'epoch': 0.0, 'num_input_tokens_seen': 12582912}\n{'loss': 10.1024, 'grad_norm': 1.889390468597412, 'learning_rate': 5.0781250000000004e-05, 'epoch': 0.0, 'num_input_tokens_seen': 13631488}\n{'loss': 10.0418, 'grad_norm': 1.8319090604782104, 'learning_rate': 5.46875e-05, 'epoch': 0.0, 'num_input_tokens_seen': 14680064}\n{'loss': 10.0081, 'grad_norm': 1.7302652597427368, 'learning_rate': 5.859375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 15728640}\n{'loss': 9.9525, 'grad_norm': 1.767600417137146, 'learning_rate': 6.25e-05, 'epoch': 0.0, 'num_input_tokens_seen': 16777216}\n{'loss': 9.9326, 'grad_norm': 2.1608240604400635, 'learning_rate': 6.640625e-05, 'epoch': 0.0, 'num_input_tokens_seen': 17825792}\n{'loss': 9.8478, 'grad_norm': 1.7399269342422485, 'learning_rate': 7.031250000000001e-05, 'epoch': 0.0, 'num_input_tokens_seen': 18874368}\n{'loss': 9.8215, 'grad_norm': 1.6564425230026245, 'learning_rate': 7.421875e-05, 'epoch': 0.0, 'num_input_tokens_seen': 19922944}\n{'loss': 9.7732, 'grad_norm': 1.6452653408050537, 'learning_rate': 7.8125e-05, 'epoch': 0.0, 'num_input_tokens_seen': 20971520}\n{'loss': 9.6896, 'grad_norm': 1.7053238153457642, 'learning_rate': 8.203125e-05, 'epoch': 0.0, 'num_input_tokens_seen': 22020096}\n{'loss': 9.6356, 'grad_norm': 1.7050201892852783, 'learning_rate': 8.59375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 23068672}\n{'loss': 9.5781, 'grad_norm': 1.7155998945236206, 'learning_rate': 8.984375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 24117248}\n{'loss': 9.5355, 'grad_norm': 1.697864294052124, 'learning_rate': 9.375e-05, 'epoch': 0.0, 'num_input_tokens_seen': 25165824}\n{'loss': 9.4718, 'grad_norm': 1.7598071098327637, 'learning_rate': 9.765625e-05, 'epoch': 0.0, 'num_input_tokens_seen': 26214400}\n{'loss': 9.3972, 'grad_norm': 1.7407673597335815, 'learning_rate': 0.00010156250000000001, 'epoch': 0.0, 'num_input_tokens_seen': 27262976}\n{'loss': 9.3303, 'grad_norm': 1.7710134983062744, 'learning_rate': 0.00010546875, 'epoch': 0.0, 'num_input_tokens_seen': 28311552}\n{'loss': 9.2973, 'grad_norm': 1.716180682182312, 'learning_rate': 0.000109375, 'epoch': 0.0, 'num_input_tokens_seen': 29360128}\n{'loss': 9.2049, 'grad_norm': 1.7579947710037231, 'learning_rate': 0.00011328125, 'epoch': 0.0, 'num_input_tokens_seen': 30408704}\n{'loss': 9.1656, 'grad_norm': 1.6988558769226074, 'learning_rate': 0.0001171875, 'epoch': 0.0, 'num_input_tokens_seen': 31457280}\n{'loss': 9.0966, 'grad_norm': 1.7036350965499878, 'learning_rate': 0.00012109375, 'epoch': 0.0, 'num_input_tokens_seen': 32505856}\n{'loss': 9.0107, 'grad_norm': 1.752451777458191, 'learning_rate': 0.000125, 'epoch': 0.0, 'num_input_tokens_seen': 33554432}\n{'loss': 8.9788, 'grad_norm': 1.6769776344299316, 'learning_rate': 0.00012890625, 'epoch': 0.0, 'num_input_tokens_seen': 34603008}\n{'loss': 8.9155, 'grad_norm': 1.6497987508773804, 'learning_rate': 0.0001328125, 'epoch': 0.0, 'num_input_tokens_seen': 35651584}\n{'loss': 8.8008, 'grad_norm': 1.722798466682434, 'learning_rate': 0.00013671875, 'epoch': 0.0, 'num_input_tokens_seen': 36700160}\n{'loss': 8.7727, 'grad_norm': 1.6046854257583618, 'learning_rate': 0.00014062500000000002, 'epoch': 0.0, 'num_input_tokens_seen': 37748736}\n{'loss': 8.682, 'grad_norm': 1.6132164001464844, 'learning_rate': 0.00014453125, 'epoch': 0.0, 'num_input_tokens_seen': 38797312}\n{'loss': 8.6516, 'grad_norm': 1.558968424797058, 'learning_rate': 0.0001484375, 'epoch': 0.0, 'num_input_tokens_seen': 39845888}\n{'loss': 8.5935, 'grad_norm': 1.6083673238754272, 'learning_rate': 0.00015234375, 'epoch': 0.0, 'num_input_tokens_seen': 40894464}\n{'loss': 8.4852, 'grad_norm': 1.5469273328781128, 'learning_rate': 0.00015625, 'epoch': 0.0, 'num_input_tokens_seen': 41943040}\n{'loss': 8.4342, 'grad_norm': 1.46219801902771, 'learning_rate': 0.00016015625, 'epoch': 0.01, 'num_input_tokens_seen': 42991616}\n{'loss': 8.3213, 'grad_norm': 1.473191261291504, 'learning_rate': 0.0001640625, 'epoch': 0.01, 'num_input_tokens_seen': 44040192}\n{'loss': 8.3193, 'grad_norm': 1.4024137258529663, 'learning_rate': 0.00016796875000000001, 'epoch': 0.01, 'num_input_tokens_seen': 45088768}\n{'loss': 8.1853, 'grad_norm': 1.3591463565826416, 'learning_rate': 0.000171875, 'epoch': 0.01, 'num_input_tokens_seen': 46137344}\n{'loss': 8.1109, 'grad_norm': 1.3547109365463257, 'learning_rate': 0.00017578125, 'epoch': 0.01, 'num_input_tokens_seen': 47185920}\n{'loss': 8.0741, 'grad_norm': 1.268977403640747, 'learning_rate': 0.0001796875, 'epoch': 0.01, 'num_input_tokens_seen': 48234496}\n{'loss': 8.0032, 'grad_norm': 1.222671389579773, 'learning_rate': 0.00018359375, 'epoch': 0.01, 'num_input_tokens_seen': 49283072}\n{'loss': 7.9346, 'grad_norm': 1.154278039932251, 'learning_rate': 0.0001875, 'epoch': 0.01, 'num_input_tokens_seen': 50331648}\n{'loss': 7.8823, 'grad_norm': 1.1396397352218628, 'learning_rate': 0.00019140625, 'epoch': 0.01, 'num_input_tokens_seen': 51380224}\n{'loss': 7.8444, 'grad_norm': 1.0608373880386353, 'learning_rate': 0.0001953125, 'epoch': 0.01, 'num_input_tokens_seen': 52428800}\n{'loss': 7.7794, 'grad_norm': 1.0165436267852783, 'learning_rate': 0.00019921875000000001, 'epoch': 0.01, 'num_input_tokens_seen': 53477376}\n{'loss': 7.7567, 'grad_norm': 0.8742461204528809, 'learning_rate': 0.00020312500000000002, 'epoch': 0.01, 'num_input_tokens_seen': 54525952}\n{'loss': 7.6489, 'grad_norm': 0.8699902296066284, 'learning_rate': 0.00020703125, 'epoch': 0.01, 'num_input_tokens_seen': 55574528}\n{'loss': 7.6062, 'grad_norm': 0.809831440448761, 'learning_rate': 0.0002109375, 'epoch': 0.01, 'num_input_tokens_seen': 56623104}\n{'loss': 7.5511, 'grad_norm': 0.7423847317695618, 'learning_rate': 0.00021484375, 'epoch': 0.01, 'num_input_tokens_seen': 57671680}\n{'loss': 7.4435, 'grad_norm': 0.7614696025848389, 'learning_rate': 0.00021875, 'epoch': 0.01, 'num_input_tokens_seen': 58720256}\n{'loss': 7.564, 'grad_norm': 0.5147746801376343, 'learning_rate': 0.00022265625, 'epoch': 0.01, 'num_input_tokens_seen': 59768832}\n{'loss': 7.5278, 'grad_norm': 0.4705545902252197, 'learning_rate': 0.0002265625, 'epoch': 0.01, 'num_input_tokens_seen': 60817408}\n{'loss': 7.5479, 'grad_norm': 0.3745419979095459, 'learning_rate': 0.00023046875000000001, 'epoch': 0.01, 'num_input_tokens_seen': 61865984}\n{'loss': 7.4759, 'grad_norm': 0.3893500566482544, 'learning_rate': 0.000234375, 'epoch': 0.01, 'num_input_tokens_seen': 62914560}\n{'loss': 7.5032, 'grad_norm': 0.31959569454193115, 'learning_rate': 0.00023828125, 'epoch': 0.01, 'num_input_tokens_seen': 63963136}\n{'loss': 7.421, 'grad_norm': 0.3203206956386566, 'learning_rate': 0.0002421875, 'epoch': 0.01, 'num_input_tokens_seen': 65011712}\n{'loss': 7.4998, 'grad_norm': 0.2730390429496765, 'learning_rate': 0.00024609375, 'epoch': 0.01, 'num_input_tokens_seen': 66060288}\n{'loss': 7.4157, 'grad_norm': 0.34872403740882874, 'learning_rate': 0.00025, 'epoch': 0.01, 'num_input_tokens_seen': 67108864}\n[2025-03-10 16:17:04 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 5ef28452-e903-4bd8-946d-f0c77f558a2a)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_4799.jsonl.zst\n[2025-03-10 16:17:04 WARNING] Retrying in 1s [Retry 1/5].\n{'eval_loss': 7.471163749694824, 'eval_runtime': 651.4801, 'eval_samples_per_second': 25.149, 'eval_steps_per_second': 0.196, 'epoch': 0.01, 'num_input_tokens_seen': 67108864}\n{'loss': 7.5083, 'grad_norm': 0.339502215385437, 'learning_rate': 0.00025390625, 'epoch': 0.01, 'num_input_tokens_seen': 68157440}\n{'loss': 7.7083, 'grad_norm': 0.6426190137863159, 'learning_rate': 0.0002578125, 'epoch': 0.01, 'num_input_tokens_seen': 69206016}\n{'loss': 7.446, 'grad_norm': 0.9138129353523254, 'learning_rate': 0.00026171875, 'epoch': 0.01, 'num_input_tokens_seen': 70254592}\n{'loss': 7.3747, 'grad_norm': 1.2179911136627197, 'learning_rate': 0.000265625, 'epoch': 0.01, 'num_input_tokens_seen': 71303168}\n{'loss': 7.367, 'grad_norm': 0.7108445167541504, 'learning_rate': 0.00026953125, 'epoch': 0.01, 'num_input_tokens_seen': 72351744}\n{'loss': 7.4751, 'grad_norm': 0.7580183744430542, 'learning_rate': 0.0002734375, 'epoch': 0.01, 'num_input_tokens_seen': 73400320}\n{'loss': 7.3405, 'grad_norm': 0.7545790076255798, 'learning_rate': 0.00027734375000000003, 'epoch': 0.01, 'num_input_tokens_seen': 74448896}\n{'loss': 7.4194, 'grad_norm': 0.4764443039894104, 'learning_rate': 0.00028125000000000003, 'epoch': 0.01, 'num_input_tokens_seen': 75497472}\n{'loss': 7.2826, 'grad_norm': 0.5942808985710144, 'learning_rate': 0.00028515625, 'epoch': 0.01, 'num_input_tokens_seen': 76546048}\n{'loss': 7.3945, 'grad_norm': 0.5272891521453857, 'learning_rate': 0.0002890625, 'epoch': 0.01, 'num_input_tokens_seen': 77594624}\n{'loss': 7.3492, 'grad_norm': 0.465085506439209, 'learning_rate': 0.00029296875, 'epoch': 0.01, 'num_input_tokens_seen': 78643200}\n{'loss': 7.3658, 'grad_norm': 0.6932719349861145, 'learning_rate': 0.000296875, 'epoch': 0.01, 'num_input_tokens_seen': 79691776}\n{'loss': 7.3554, 'grad_norm': 0.49396172165870667, 'learning_rate': 0.00030078125, 'epoch': 0.01, 'num_input_tokens_seen': 80740352}\n{'loss': 7.2916, 'grad_norm': 0.3178255558013916, 'learning_rate': 0.0003046875, 'epoch': 0.01, 'num_input_tokens_seen': 81788928}\n{'loss': 7.2871, 'grad_norm': 0.5465154647827148, 'learning_rate': 0.00030859375, 'epoch': 0.01, 'num_input_tokens_seen': 82837504}\n{'loss': 7.262, 'grad_norm': 0.4718130826950073, 'learning_rate': 0.0003125, 'epoch': 0.01, 'num_input_tokens_seen': 83886080}\n{'loss': 7.2845, 'grad_norm': 0.5033366680145264, 'learning_rate': 0.00031640625, 'epoch': 0.01, 'num_input_tokens_seen': 84934656}\n{'loss': 7.2525, 'grad_norm': 0.5601146817207336, 'learning_rate': 0.0003203125, 'epoch': 0.01, 'num_input_tokens_seen': 85983232}\n{'loss': 7.1971, 'grad_norm': 0.5764456987380981, 'learning_rate': 0.00032421875, 'epoch': 0.01, 'num_input_tokens_seen': 87031808}\n{'loss': 7.1988, 'grad_norm': 0.6154745817184448, 'learning_rate': 0.000328125, 'epoch': 0.01, 'num_input_tokens_seen': 88080384}\n{'loss': 7.1987, 'grad_norm': 0.6701765656471252, 'learning_rate': 0.00033203125, 'epoch': 0.01, 'num_input_tokens_seen': 89128960}\n{'loss': 7.3324, 'grad_norm': 0.5648972988128662, 'learning_rate': 0.00033593750000000003, 'epoch': 0.01, 'num_input_tokens_seen': 90177536}\n{'loss': 7.2233, 'grad_norm': 0.5782461166381836, 'learning_rate': 0.00033984375000000003, 'epoch': 0.01, 'num_input_tokens_seen': 91226112}\n{'loss': 7.1995, 'grad_norm': 0.540762722492218, 'learning_rate': 0.00034375, 'epoch': 0.01, 'num_input_tokens_seen': 92274688}\n{'loss': 7.1214, 'grad_norm': 0.9524508118629456, 'learning_rate': 0.00034765625, 'epoch': 0.01, 'num_input_tokens_seen': 93323264}\n{'loss': 7.1603, 'grad_norm': 1.4820659160614014, 'learning_rate': 0.0003515625, 'epoch': 0.01, 'num_input_tokens_seen': 94371840}\n{'loss': 7.2364, 'grad_norm': 0.6124428510665894, 'learning_rate': 0.00035546875, 'epoch': 0.01, 'num_input_tokens_seen': 95420416}\n{'loss': 7.0258, 'grad_norm': 0.8897235989570618, 'learning_rate': 0.000359375, 'epoch': 0.01, 'num_input_tokens_seen': 96468992}\n{'loss': 7.1182, 'grad_norm': 0.9263321757316589, 'learning_rate': 0.00036328125, 'epoch': 0.01, 'num_input_tokens_seen': 97517568}\n{'loss': 7.109, 'grad_norm': 0.5800505876541138, 'learning_rate': 0.0003671875, 'epoch': 0.01, 'num_input_tokens_seen': 98566144}\n{'loss': 7.0449, 'grad_norm': 0.6776424050331116, 'learning_rate': 0.00037109375, 'epoch': 0.01, 'num_input_tokens_seen': 99614720}\n{'loss': 7.1272, 'grad_norm': 0.7616431713104248, 'learning_rate': 0.000375, 'epoch': 0.01, 'num_input_tokens_seen': 100663296}\n{'loss': 7.046, 'grad_norm': 0.5346249938011169, 'learning_rate': 0.00037890625, 'epoch': 0.01, 'num_input_tokens_seen': 101711872}\n{'loss': 7.0713, 'grad_norm': 0.6108944416046143, 'learning_rate': 0.0003828125, 'epoch': 0.01, 'num_input_tokens_seen': 102760448}\n{'loss': 7.1459, 'grad_norm': 0.4430749714374542, 'learning_rate': 0.00038671875, 'epoch': 0.01, 'num_input_tokens_seen': 103809024}\n{'loss': 7.0709, 'grad_norm': 0.6020255088806152, 'learning_rate': 0.000390625, 'epoch': 0.01, 'num_input_tokens_seen': 104857600}\n{'loss': 7.0144, 'grad_norm': 0.5525627732276917, 'learning_rate': 0.00039453125, 'epoch': 0.01, 'num_input_tokens_seen': 105906176}\n{'loss': 7.0926, 'grad_norm': 0.6909684538841248, 'learning_rate': 0.00039843750000000003, 'epoch': 0.01, 'num_input_tokens_seen': 106954752}\n{'loss': 7.0289, 'grad_norm': 0.5576740503311157, 'learning_rate': 0.00040234375000000003, 'epoch': 0.01, 'num_input_tokens_seen': 108003328}\n{'loss': 6.9173, 'grad_norm': 0.48874178528785706, 'learning_rate': 0.00040625000000000004, 'epoch': 0.01, 'num_input_tokens_seen': 109051904}\n{'loss': 6.9777, 'grad_norm': 0.3904782831668854, 'learning_rate': 0.00041015625, 'epoch': 0.01, 'num_input_tokens_seen': 110100480}\n{'loss': 6.9473, 'grad_norm': 0.3953755795955658, 'learning_rate': 0.0004140625, 'epoch': 0.01, 'num_input_tokens_seen': 111149056}\n{'loss': 6.9071, 'grad_norm': 0.43107134103775024, 'learning_rate': 0.00041796875, 'epoch': 0.01, 'num_input_tokens_seen': 112197632}\n{'loss': 6.9277, 'grad_norm': 0.33989447355270386, 'learning_rate': 0.000421875, 'epoch': 0.01, 'num_input_tokens_seen': 113246208}\n{'loss': 6.914, 'grad_norm': 0.3267095983028412, 'learning_rate': 0.00042578125, 'epoch': 0.01, 'num_input_tokens_seen': 114294784}\n{'loss': 6.6865, 'grad_norm': 0.4201946556568146, 'learning_rate': 0.0004296875, 'epoch': 0.01, 'num_input_tokens_seen': 115343360}\n{'loss': 6.8229, 'grad_norm': 0.345426082611084, 'learning_rate': 0.00043359375, 'epoch': 0.01, 'num_input_tokens_seen': 116391936}\n{'loss': 6.8599, 'grad_norm': 0.4104400873184204, 'learning_rate': 0.0004375, 'epoch': 0.01, 'num_input_tokens_seen': 117440512}\n{'loss': 6.7656, 'grad_norm': 0.6487549543380737, 'learning_rate': 0.00044140625, 'epoch': 0.01, 'num_input_tokens_seen': 118489088}\n{'loss': 6.8654, 'grad_norm': 1.5497283935546875, 'learning_rate': 0.0004453125, 'epoch': 0.01, 'num_input_tokens_seen': 119537664}\n{'loss': 6.8207, 'grad_norm': 1.9772824048995972, 'learning_rate': 0.00044921875, 'epoch': 0.01, 'num_input_tokens_seen': 120586240}\n{'loss': 6.7802, 'grad_norm': 0.9341455101966858, 'learning_rate': 0.000453125, 'epoch': 0.01, 'num_input_tokens_seen': 121634816}\n{'loss': 6.8017, 'grad_norm': 1.3528856039047241, 'learning_rate': 0.00045703125, 'epoch': 0.01, 'num_input_tokens_seen': 122683392}\n{'loss': 6.8344, 'grad_norm': 0.5852281451225281, 'learning_rate': 0.00046093750000000003, 'epoch': 0.01, 'num_input_tokens_seen': 123731968}\n{'loss': 6.8259, 'grad_norm': 0.9776580929756165, 'learning_rate': 0.00046484375000000003, 'epoch': 0.01, 'num_input_tokens_seen': 124780544}\n{'loss': 6.7581, 'grad_norm': 1.0398296117782593, 'learning_rate': 0.00046875, 'epoch': 0.01, 'num_input_tokens_seen': 125829120}\n{'loss': 6.7795, 'grad_norm': 1.1206268072128296, 'learning_rate': 0.00047265625, 'epoch': 0.01, 'num_input_tokens_seen': 126877696}\n{'loss': 6.5667, 'grad_norm': 0.6790318489074707, 'learning_rate': 0.0004765625, 'epoch': 0.01, 'num_input_tokens_seen': 127926272}\n{'loss': 6.7297, 'grad_norm': 1.2275055646896362, 'learning_rate': 0.00048046875, 'epoch': 0.02, 'num_input_tokens_seen': 128974848}\n{'loss': 6.7104, 'grad_norm': 1.1354466676712036, 'learning_rate': 0.000484375, 'epoch': 0.02, 'num_input_tokens_seen': 130023424}\n{'loss': 6.7025, 'grad_norm': 0.9035728573799133, 'learning_rate': 0.00048828125, 'epoch': 0.02, 'num_input_tokens_seen': 131072000}\n{'loss': 6.6391, 'grad_norm': 1.3942680358886719, 'learning_rate': 0.0004921875, 'epoch': 0.02, 'num_input_tokens_seen': 132120576}\n{'loss': 6.6011, 'grad_norm': 0.7435236573219299, 'learning_rate': 0.00049609375, 'epoch': 0.02, 'num_input_tokens_seen': 133169152}\n{'loss': 6.5135, 'grad_norm': 0.5970368385314941, 'learning_rate': 0.0005, 'epoch': 0.02, 'num_input_tokens_seen': 134217728}\n{'eval_loss': 6.573822021484375, 'eval_runtime': 629.9441, 'eval_samples_per_second': 26.009, 'eval_steps_per_second': 0.203, 'epoch': 0.02, 'num_input_tokens_seen': 134217728}\n{'loss': 6.5509, 'grad_norm': 0.7936264276504517, 'learning_rate': 0.00050390625, 'epoch': 0.02, 'num_input_tokens_seen': 135266304}\n{'loss': 6.6008, 'grad_norm': 0.6225885152816772, 'learning_rate': 0.0005078125, 'epoch': 0.02, 'num_input_tokens_seen': 136314880}\n{'loss': 6.4821, 'grad_norm': 0.5519376993179321, 'learning_rate': 0.00051171875, 'epoch': 0.02, 'num_input_tokens_seen': 137363456}\n{'loss': 6.3411, 'grad_norm': 0.5908603668212891, 'learning_rate': 0.000515625, 'epoch': 0.02, 'num_input_tokens_seen': 138412032}\n{'loss': 6.3464, 'grad_norm': 0.5101401209831238, 'learning_rate': 0.00051953125, 'epoch': 0.02, 'num_input_tokens_seen': 139460608}\n{'loss': 6.3638, 'grad_norm': 0.7352246046066284, 'learning_rate': 0.0005234375, 'epoch': 0.02, 'num_input_tokens_seen': 140509184}\n{'loss': 6.3429, 'grad_norm': 0.49651673436164856, 'learning_rate': 0.00052734375, 'epoch': 0.02, 'num_input_tokens_seen': 141557760}\n{'loss': 6.2987, 'grad_norm': 0.4835755527019501, 'learning_rate': 0.00053125, 'epoch': 0.02, 'num_input_tokens_seen': 142606336}\n{'loss': 6.2982, 'grad_norm': 0.5940163731575012, 'learning_rate': 0.00053515625, 'epoch': 0.02, 'num_input_tokens_seen': 143654912}\n{'loss': 6.267, 'grad_norm': 0.7658674120903015, 'learning_rate': 0.0005390625, 'epoch': 0.02, 'num_input_tokens_seen': 144703488}\n{'loss': 6.2102, 'grad_norm': 0.6704416275024414, 'learning_rate': 0.00054296875, 'epoch': 0.02, 'num_input_tokens_seen': 145752064}\n{'loss': 6.1956, 'grad_norm': 0.6615312099456787, 'learning_rate': 0.000546875, 'epoch': 0.02, 'num_input_tokens_seen': 146800640}\n{'loss': 6.286, 'grad_norm': 0.7957404255867004, 'learning_rate': 0.0005507812500000001, 'epoch': 0.02, 'num_input_tokens_seen': 147849216}\n{'loss': 6.2483, 'grad_norm': 0.6477276682853699, 'learning_rate': 0.0005546875000000001, 'epoch': 0.02, 'num_input_tokens_seen': 148897792}\n{'loss': 6.0944, 'grad_norm': 0.5753227472305298, 'learning_rate': 0.0005585937500000001, 'epoch': 0.02, 'num_input_tokens_seen': 149946368}\n{'loss': 6.0995, 'grad_norm': 0.5871054530143738, 'learning_rate': 0.0005625000000000001, 'epoch': 0.02, 'num_input_tokens_seen': 150994944}\n{'loss': 6.112, 'grad_norm': 0.7046136856079102, 'learning_rate': 0.00056640625, 'epoch': 0.02, 'num_input_tokens_seen': 152043520}\n{'loss': 6.102, 'grad_norm': 0.9357424378395081, 'learning_rate': 0.0005703125, 'epoch': 0.02, 'num_input_tokens_seen': 153092096}\n{'loss': 6.1407, 'grad_norm': 1.0577837228775024, 'learning_rate': 0.00057421875, 'epoch': 0.02, 'num_input_tokens_seen': 154140672}\n{'loss': 5.9836, 'grad_norm': 0.7795257568359375, 'learning_rate': 0.000578125, 'epoch': 0.02, 'num_input_tokens_seen': 155189248}\n{'loss': 6.1041, 'grad_norm': 0.8117634057998657, 'learning_rate': 0.00058203125, 'epoch': 0.02, 'num_input_tokens_seen': 156237824}\n{'loss': 5.9474, 'grad_norm': 0.8311094045639038, 'learning_rate': 0.0005859375, 'epoch': 0.02, 'num_input_tokens_seen': 157286400}\n{'loss': 5.9365, 'grad_norm': 0.8269851803779602, 'learning_rate': 0.00058984375, 'epoch': 0.02, 'num_input_tokens_seen': 158334976}\n{'loss': 5.9668, 'grad_norm': 0.701510488986969, 'learning_rate': 0.00059375, 'epoch': 0.02, 'num_input_tokens_seen': 159383552}\n{'loss': 5.9874, 'grad_norm': 0.49938252568244934, 'learning_rate': 0.00059765625, 'epoch': 0.02, 'num_input_tokens_seen': 160432128}\n{'loss': 5.8505, 'grad_norm': 0.6981683969497681, 'learning_rate': 0.0006015625, 'epoch': 0.02, 'num_input_tokens_seen': 161480704}\n{'loss': 6.0156, 'grad_norm': 0.5023297071456909, 'learning_rate': 0.00060546875, 'epoch': 0.02, 'num_input_tokens_seen': 162529280}\n{'loss': 5.8299, 'grad_norm': 0.6075630187988281, 'learning_rate': 0.000609375, 'epoch': 0.02, 'num_input_tokens_seen': 163577856}\n{'loss': 5.8203, 'grad_norm': 0.6051607728004456, 'learning_rate': 0.00061328125, 'epoch': 0.02, 'num_input_tokens_seen': 164626432}\n{'loss': 5.7705, 'grad_norm': 0.6384783983230591, 'learning_rate': 0.0006171875, 'epoch': 0.02, 'num_input_tokens_seen': 165675008}\n{'loss': 5.791, 'grad_norm': 0.5084705948829651, 'learning_rate': 0.00062109375, 'epoch': 0.02, 'num_input_tokens_seen': 166723584}\n{'loss': 5.6743, 'grad_norm': 0.4278322160243988, 'learning_rate': 0.000625, 'epoch': 0.02, 'num_input_tokens_seen': 167772160}\n{'loss': 5.7112, 'grad_norm': 0.5151192545890808, 'learning_rate': 0.00062890625, 'epoch': 0.02, 'num_input_tokens_seen': 168820736}\n{'loss': 5.5128, 'grad_norm': 0.6542677283287048, 'learning_rate': 0.0006328125, 'epoch': 0.02, 'num_input_tokens_seen': 169869312}\n{'loss': 5.6735, 'grad_norm': 0.6016008257865906, 'learning_rate': 0.00063671875, 'epoch': 0.02, 'num_input_tokens_seen': 170917888}\n{'loss': 5.6525, 'grad_norm': 0.48695647716522217, 'learning_rate': 0.000640625, 'epoch': 0.02, 'num_input_tokens_seen': 171966464}\n{'loss': 5.6051, 'grad_norm': 0.5894989371299744, 'learning_rate': 0.00064453125, 'epoch': 0.02, 'num_input_tokens_seen': 173015040}\n{'loss': 5.6377, 'grad_norm': 0.7626883387565613, 'learning_rate': 0.0006484375, 'epoch': 0.02, 'num_input_tokens_seen': 174063616}\n{'loss': 5.6038, 'grad_norm': 0.745198130607605, 'learning_rate': 0.00065234375, 'epoch': 0.02, 'num_input_tokens_seen': 175112192}\n{'loss': 5.5465, 'grad_norm': 0.7876908779144287, 'learning_rate': 0.00065625, 'epoch': 0.02, 'num_input_tokens_seen': 176160768}\n{'loss': 5.5903, 'grad_norm': 0.7416785359382629, 'learning_rate': 0.00066015625, 'epoch': 0.02, 'num_input_tokens_seen': 177209344}\n{'loss': 5.4993, 'grad_norm': 0.4493878185749054, 'learning_rate': 0.0006640625, 'epoch': 0.02, 'num_input_tokens_seen': 178257920}\n{'loss': 5.5612, 'grad_norm': 0.5095419883728027, 'learning_rate': 0.00066796875, 'epoch': 0.02, 'num_input_tokens_seen': 179306496}\n{'loss': 5.378, 'grad_norm': 0.6330733895301819, 'learning_rate': 0.0006718750000000001, 'epoch': 0.02, 'num_input_tokens_seen': 180355072}\n{'loss': 5.4875, 'grad_norm': 0.4710595905780792, 'learning_rate': 0.0006757812500000001, 'epoch': 0.02, 'num_input_tokens_seen': 181403648}\n{'loss': 5.4221, 'grad_norm': 0.5276287198066711, 'learning_rate': 0.0006796875000000001, 'epoch': 0.02, 'num_input_tokens_seen': 182452224}\n{'loss': 5.308, 'grad_norm': 0.6985499858856201, 'learning_rate': 0.0006835937500000001, 'epoch': 0.02, 'num_input_tokens_seen': 183500800}\n{'loss': 5.4455, 'grad_norm': 0.4874110519886017, 'learning_rate': 0.0006875, 'epoch': 0.02, 'num_input_tokens_seen': 184549376}\n{'loss': 5.476, 'grad_norm': 0.5807638764381409, 'learning_rate': 0.00069140625, 'epoch': 0.02, 'num_input_tokens_seen': 185597952}\n{'loss': 5.2876, 'grad_norm': 0.5431288480758667, 'learning_rate': 0.0006953125, 'epoch': 0.02, 'num_input_tokens_seen': 186646528}\n{'loss': 5.3881, 'grad_norm': 0.7681945562362671, 'learning_rate': 0.00069921875, 'epoch': 0.02, 'num_input_tokens_seen': 187695104}\n{'loss': 5.4006, 'grad_norm': 0.7372023463249207, 'learning_rate': 0.000703125, 'epoch': 0.02, 'num_input_tokens_seen': 188743680}\n{'loss': 5.3813, 'grad_norm': 0.7354347109794617, 'learning_rate': 0.00070703125, 'epoch': 0.02, 'num_input_tokens_seen': 189792256}\n{'loss': 5.3393, 'grad_norm': 0.5908933281898499, 'learning_rate': 0.0007109375, 'epoch': 0.02, 'num_input_tokens_seen': 190840832}\n{'loss': 5.3024, 'grad_norm': 0.5665153861045837, 'learning_rate': 0.00071484375, 'epoch': 0.02, 'num_input_tokens_seen': 191889408}\n{'loss': 5.2782, 'grad_norm': 0.5930947661399841, 'learning_rate': 0.00071875, 'epoch': 0.02, 'num_input_tokens_seen': 192937984}\n{'loss': 5.3199, 'grad_norm': 0.5926457643508911, 'learning_rate': 0.00072265625, 'epoch': 0.02, 'num_input_tokens_seen': 193986560}\n{'loss': 5.2949, 'grad_norm': 0.548610270023346, 'learning_rate': 0.0007265625, 'epoch': 0.02, 'num_input_tokens_seen': 195035136}\n{'loss': 5.3143, 'grad_norm': 0.6023995280265808, 'learning_rate': 0.00073046875, 'epoch': 0.02, 'num_input_tokens_seen': 196083712}\n{'loss': 5.2982, 'grad_norm': 1.0335254669189453, 'learning_rate': 0.000734375, 'epoch': 0.02, 'num_input_tokens_seen': 197132288}\n{'loss': 5.2933, 'grad_norm': 1.2596269845962524, 'learning_rate': 0.00073828125, 'epoch': 0.02, 'num_input_tokens_seen': 198180864}\n{'loss': 5.2524, 'grad_norm': 0.6956535577774048, 'learning_rate': 0.0007421875, 'epoch': 0.02, 'num_input_tokens_seen': 199229440}\n{'loss': 5.3543, 'grad_norm': 0.946761429309845, 'learning_rate': 0.00074609375, 'epoch': 0.02, 'num_input_tokens_seen': 200278016}\n{'loss': 5.1616, 'grad_norm': 0.9568974375724792, 'learning_rate': 0.00075, 'epoch': 0.02, 'num_input_tokens_seen': 201326592}\n[2025-03-10 18:01:06 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: ba6e4c51-f4a4-407e-9934-3772550b7ce9)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_2770.jsonl.zst\n[2025-03-10 18:01:06 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:02:30 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: bdf2cfaa-7e0b-46a0-bec1-b1e573fa7998)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_4386.jsonl.zst\n[2025-03-10 18:02:30 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:02:44 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 1dc5e455-8042-4c7b-9b97-5ded33dfea34)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_1763.jsonl.zst\n[2025-03-10 18:02:44 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:04:45 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 9cf29917-8111-41fe-80aa-953df65c5803)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_5509.jsonl.zst\n[2025-03-10 18:04:45 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:05:26 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 2515a0b0-3d81-409f-940c-e78ed5e2dbf8)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_3093.jsonl.zst\n[2025-03-10 18:05:26 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:06:39 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: a4c1e0c7-1c7a-4377-bc7e-6f076473072b)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_3422.jsonl.zst\n[2025-03-10 18:06:39 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:07:37 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c7b0d366-db86-4d0c-a4e0-be251d26519e)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_2250.jsonl.zst\n[2025-03-10 18:07:37 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:09:23 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: b0df5a1a-4836-46cf-8e45-58a7c1553309)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_6161.jsonl.zst\n[2025-03-10 18:09:23 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-10 18:09:44 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c1d97368-c0ae-45bb-ae10-5559b3ebc4e4)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_5782.jsonl.zst\n[2025-03-10 18:09:44 WARNING] Retrying in 1s [Retry 1/5].\n{'eval_loss': 5.276012420654297, 'eval_runtime': 754.8295, 'eval_samples_per_second': 21.706, 'eval_steps_per_second': 0.17, 'epoch': 0.02, 'num_input_tokens_seen': 201326592}\n{'loss': 5.2363, 'grad_norm': 0.8435476422309875, 'learning_rate': 0.00075390625, 'epoch': 0.02, 'num_input_tokens_seen': 202375168}\n{'loss': 5.1035, 'grad_norm': 1.1267820596694946, 'learning_rate': 0.0007578125, 'epoch': 0.02, 'num_input_tokens_seen': 203423744}\n{'loss': 5.3017, 'grad_norm': 0.8555666208267212, 'learning_rate': 0.00076171875, 'epoch': 0.02, 'num_input_tokens_seen': 204472320}\n{'loss': 5.1679, 'grad_norm': 0.7608171105384827, 'learning_rate': 0.000765625, 'epoch': 0.02, 'num_input_tokens_seen': 205520896}\n{'loss': 5.2326, 'grad_norm': 0.6787221431732178, 'learning_rate': 0.00076953125, 'epoch': 0.02, 'num_input_tokens_seen': 206569472}\n{'loss': 5.144, 'grad_norm': 0.6404955983161926, 'learning_rate': 0.0007734375, 'epoch': 0.02, 'num_input_tokens_seen': 207618048}\n{'loss': 5.1933, 'grad_norm': 0.6099393367767334, 'learning_rate': 0.00077734375, 'epoch': 0.02, 'num_input_tokens_seen': 208666624}\n{'loss': 5.0498, 'grad_norm': 0.5971768498420715, 'learning_rate': 0.00078125, 'epoch': 0.02, 'num_input_tokens_seen': 209715200}\n{'loss': 5.1443, 'grad_norm': 0.642633318901062, 'learning_rate': 0.00078515625, 'epoch': 0.02, 'num_input_tokens_seen': 210763776}\n{'loss': 5.2125, 'grad_norm': 0.706398606300354, 'learning_rate': 0.0007890625, 'epoch': 0.02, 'num_input_tokens_seen': 211812352}\n{'loss': 5.1882, 'grad_norm': 0.817449688911438, 'learning_rate': 0.00079296875, 'epoch': 0.02, 'num_input_tokens_seen': 212860928}\n{'loss': 5.0905, 'grad_norm': 0.9392185807228088, 'learning_rate': 0.0007968750000000001, 'epoch': 0.02, 'num_input_tokens_seen': 213909504}\n{'loss': 5.059, 'grad_norm': 0.5305852890014648, 'learning_rate': 0.0008007812500000001, 'epoch': 0.03, 'num_input_tokens_seen': 214958080}\n{'loss': 5.0838, 'grad_norm': 0.7662672996520996, 'learning_rate': 0.0008046875000000001, 'epoch': 0.03, 'num_input_tokens_seen': 216006656}\n{'loss': 5.0112, 'grad_norm': 0.5768160223960876, 'learning_rate': 0.0008085937500000001, 'epoch': 0.03, 'num_input_tokens_seen': 217055232}\n{'loss': 4.9684, 'grad_norm': 0.5972586870193481, 'learning_rate': 0.0008125000000000001, 'epoch': 0.03, 'num_input_tokens_seen': 218103808}\n{'loss': 5.0764, 'grad_norm': 0.559498131275177, 'learning_rate': 0.00081640625, 'epoch': 0.03, 'num_input_tokens_seen': 219152384}\n{'loss': 5.0117, 'grad_norm': 0.555585503578186, 'learning_rate': 0.0008203125, 'epoch': 0.03, 'num_input_tokens_seen': 220200960}\n{'loss': 5.1955, 'grad_norm': 0.6180793046951294, 'learning_rate': 0.00082421875, 'epoch': 0.03, 'num_input_tokens_seen': 221249536}\n{'loss': 5.1265, 'grad_norm': 0.5784006118774414, 'learning_rate': 0.000828125, 'epoch': 0.03, 'num_input_tokens_seen': 222298112}\n{'loss': 5.03, 'grad_norm': 0.5200456380844116, 'learning_rate': 0.00083203125, 'epoch': 0.03, 'num_input_tokens_seen': 223346688}\n{'loss': 5.051, 'grad_norm': 0.5112505555152893, 'learning_rate': 0.0008359375, 'epoch': 0.03, 'num_input_tokens_seen': 224395264}\n{'loss': 5.0994, 'grad_norm': 0.44979697465896606, 'learning_rate': 0.00083984375, 'epoch': 0.03, 'num_input_tokens_seen': 225443840}\n{'loss': 4.94, 'grad_norm': 0.46642380952835083, 'learning_rate': 0.00084375, 'epoch': 0.03, 'num_input_tokens_seen': 226492416}\n{'loss': 5.0562, 'grad_norm': 0.49667519330978394, 'learning_rate': 0.00084765625, 'epoch': 0.03, 'num_input_tokens_seen': 227540992}\n{'loss': 4.9217, 'grad_norm': 0.4302496314048767, 'learning_rate': 0.0008515625, 'epoch': 0.03, 'num_input_tokens_seen': 228589568}\n{'loss': 4.8588, 'grad_norm': 0.5326887369155884, 'learning_rate': 0.00085546875, 'epoch': 0.03, 'num_input_tokens_seen': 229638144}\n{'loss': 4.8501, 'grad_norm': 0.45604026317596436, 'learning_rate': 0.000859375, 'epoch': 0.03, 'num_input_tokens_seen': 230686720}\n{'loss': 4.8774, 'grad_norm': 0.4497997462749481, 'learning_rate': 0.00086328125, 'epoch': 0.03, 'num_input_tokens_seen': 231735296}\n{'loss': 5.0143, 'grad_norm': 0.526670515537262, 'learning_rate': 0.0008671875, 'epoch': 0.03, 'num_input_tokens_seen': 232783872}\n{'loss': 4.9512, 'grad_norm': 0.5823948979377747, 'learning_rate': 0.00087109375, 'epoch': 0.03, 'num_input_tokens_seen': 233832448}\n{'loss': 4.915, 'grad_norm': 0.6516634821891785, 'learning_rate': 0.000875, 'epoch': 0.03, 'num_input_tokens_seen': 234881024}\n{'loss': 4.9318, 'grad_norm': 0.7564677596092224, 'learning_rate': 0.00087890625, 'epoch': 0.03, 'num_input_tokens_seen': 235929600}\n{'loss': 4.9041, 'grad_norm': 0.7170491814613342, 'learning_rate': 0.0008828125, 'epoch': 0.03, 'num_input_tokens_seen': 236978176}\n{'loss': 4.9727, 'grad_norm': 0.7671059966087341, 'learning_rate': 0.00088671875, 'epoch': 0.03, 'num_input_tokens_seen': 238026752}\n{'loss': 4.7895, 'grad_norm': 0.8752806782722473, 'learning_rate': 0.000890625, 'epoch': 0.03, 'num_input_tokens_seen': 239075328}\n{'loss': 4.8845, 'grad_norm': 0.8313667178153992, 'learning_rate': 0.00089453125, 'epoch': 0.03, 'num_input_tokens_seen': 240123904}\n{'loss': 4.8325, 'grad_norm': 0.9223323464393616, 'learning_rate': 0.0008984375, 'epoch': 0.03, 'num_input_tokens_seen': 241172480}\n{'loss': 4.8991, 'grad_norm': 0.7362072467803955, 'learning_rate': 0.00090234375, 'epoch': 0.03, 'num_input_tokens_seen': 242221056}\n{'loss': 4.7443, 'grad_norm': 0.6667400598526001, 'learning_rate': 0.00090625, 'epoch': 0.03, 'num_input_tokens_seen': 243269632}\n{'loss': 4.8913, 'grad_norm': 0.5431771874427795, 'learning_rate': 0.00091015625, 'epoch': 0.03, 'num_input_tokens_seen': 244318208}\n{'loss': 4.8997, 'grad_norm': 0.5542160272598267, 'learning_rate': 0.0009140625, 'epoch': 0.03, 'num_input_tokens_seen': 245366784}\n{'loss': 4.8448, 'grad_norm': 0.6110911965370178, 'learning_rate': 0.0009179687500000001, 'epoch': 0.03, 'num_input_tokens_seen': 246415360}\n{'loss': 4.7975, 'grad_norm': 0.5550041794776917, 'learning_rate': 0.0009218750000000001, 'epoch': 0.03, 'num_input_tokens_seen': 247463936}\n{'loss': 4.87, 'grad_norm': 0.4778221547603607, 'learning_rate': 0.0009257812500000001, 'epoch': 0.03, 'num_input_tokens_seen': 248512512}\n{'loss': 4.7594, 'grad_norm': 0.35899603366851807, 'learning_rate': 0.0009296875000000001, 'epoch': 0.03, 'num_input_tokens_seen': 249561088}\n{'loss': 4.8338, 'grad_norm': 0.494094580411911, 'learning_rate': 0.0009335937500000001, 'epoch': 0.03, 'num_input_tokens_seen': 250609664}\n{'loss': 4.7424, 'grad_norm': 0.4671477675437927, 'learning_rate': 0.0009375, 'epoch': 0.03, 'num_input_tokens_seen': 251658240}\n{'loss': 4.7593, 'grad_norm': 0.4691649079322815, 'learning_rate': 0.00094140625, 'epoch': 0.03, 'num_input_tokens_seen': 252706816}\n{'loss': 4.7869, 'grad_norm': 0.6212939023971558, 'learning_rate': 0.0009453125, 'epoch': 0.03, 'num_input_tokens_seen': 253755392}\n{'loss': 4.7925, 'grad_norm': 0.621306300163269, 'learning_rate': 0.00094921875, 'epoch': 0.03, 'num_input_tokens_seen': 254803968}\n{'loss': 4.7714, 'grad_norm': 0.6991429328918457, 'learning_rate': 0.000953125, 'epoch': 0.03, 'num_input_tokens_seen': 255852544}\n{'loss': 5.2726, 'grad_norm': 1.016664743423462, 'learning_rate': 0.00095703125, 'epoch': 0.03, 'num_input_tokens_seen': 256901120}\n{'loss': 4.9125, 'grad_norm': 1.3091747760772705, 'learning_rate': 0.0009609375, 'epoch': 0.03, 'num_input_tokens_seen': 257949696}\n{'loss': 4.839, 'grad_norm': 1.2617076635360718, 'learning_rate': 0.00096484375, 'epoch': 0.03, 'num_input_tokens_seen': 258998272}\n{'loss': 4.8412, 'grad_norm': 0.9403041005134583, 'learning_rate': 0.00096875, 'epoch': 0.03, 'num_input_tokens_seen': 260046848}\n{'loss': 5.0193, 'grad_norm': 0.9802642464637756, 'learning_rate': 0.00097265625, 'epoch': 0.03, 'num_input_tokens_seen': 261095424}\n{'loss': 4.7372, 'grad_norm': 0.9636861085891724, 'learning_rate': 0.0009765625, 'epoch': 0.03, 'num_input_tokens_seen': 262144000}\n{'loss': 4.7878, 'grad_norm': 0.7803710699081421, 'learning_rate': 0.00098046875, 'epoch': 0.03, 'num_input_tokens_seen': 263192576}\n{'loss': 4.8126, 'grad_norm': 0.7087182402610779, 'learning_rate': 0.000984375, 'epoch': 0.03, 'num_input_tokens_seen': 264241152}\n{'loss': 4.7252, 'grad_norm': 0.7220279574394226, 'learning_rate': 0.00098828125, 'epoch': 0.03, 'num_input_tokens_seen': 265289728}\n{'loss': 4.7419, 'grad_norm': 0.6956494450569153, 'learning_rate': 0.0009921875, 'epoch': 0.03, 'num_input_tokens_seen': 266338304}\n{'loss': 4.8041, 'grad_norm': 0.8009976148605347, 'learning_rate': 0.00099609375, 'epoch': 0.03, 'num_input_tokens_seen': 267386880}\n{'loss': 4.7016, 'grad_norm': 0.6665300130844116, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 268435456}\n{'eval_loss': 4.753816604614258, 'eval_runtime': 661.8529, 'eval_samples_per_second': 24.755, 'eval_steps_per_second': 0.193, 'epoch': 0.03, 'num_input_tokens_seen': 268435456}\n{'loss': 4.6762, 'grad_norm': 0.5311985611915588, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 269484032}\n{'loss': 4.6296, 'grad_norm': 0.5160760879516602, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 270532608}\n{'loss': 4.7422, 'grad_norm': 0.5964047312736511, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 271581184}\n{'loss': 4.7396, 'grad_norm': 0.4793979227542877, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 272629760}\n{'loss': 4.733, 'grad_norm': 0.5280688405036926, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 273678336}\n{'loss': 4.9591, 'grad_norm': 0.8669152855873108, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 274726912}\n{'loss': 4.7953, 'grad_norm': 0.8417720198631287, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 275775488}\n{'loss': 4.7972, 'grad_norm': 0.9349585175514221, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 276824064}\n{'loss': 4.7233, 'grad_norm': 0.8441230654716492, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 277872640}\n{'loss': 4.8032, 'grad_norm': 0.7163352370262146, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 278921216}\n{'loss': 4.4369, 'grad_norm': 1.0364480018615723, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 279969792}\n{'loss': 4.557, 'grad_norm': 1.012042760848999, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 281018368}\n{'loss': 4.7696, 'grad_norm': 1.1818541288375854, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 282066944}\n{'loss': 4.7835, 'grad_norm': 0.8296499848365784, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 283115520}\n{'loss': 4.761, 'grad_norm': 0.6920194625854492, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 284164096}\n{'loss': 4.6239, 'grad_norm': 0.8495435118675232, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 285212672}\n{'loss': 4.6914, 'grad_norm': 0.6536931991577148, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 286261248}\n{'loss': 4.776, 'grad_norm': 0.7161967754364014, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 287309824}\n{'loss': 4.7096, 'grad_norm': 0.5441194176673889, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 288358400}\n{'loss': 4.7278, 'grad_norm': 0.5437328219413757, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 289406976}\n{'loss': 4.6126, 'grad_norm': 0.49404028058052063, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 290455552}\n{'loss': 4.6594, 'grad_norm': 0.4274217188358307, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 291504128}\n{'loss': 4.6365, 'grad_norm': 0.48871853947639465, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 292552704}\n{'loss': 4.5999, 'grad_norm': 0.5101707577705383, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 293601280}\n{'loss': 4.5869, 'grad_norm': 0.4579870104789734, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 294649856}\n{'loss': 4.5993, 'grad_norm': 0.44694098830223083, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 295698432}\n{'loss': 4.6369, 'grad_norm': 0.42955130338668823, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 296747008}\n{'loss': 4.5973, 'grad_norm': 0.532283365726471, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 297795584}\n{'loss': 4.3953, 'grad_norm': 0.5553389191627502, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 298844160}\n{'loss': 4.5501, 'grad_norm': 0.4733176529407501, 'learning_rate': 0.001, 'epoch': 0.03, 'num_input_tokens_seen': 299892736}\n{'loss': 4.4896, 'grad_norm': 0.5510519742965698, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 300941312}\n{'loss': 4.348, 'grad_norm': 0.5312983393669128, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 301989888}\n{'loss': 4.4, 'grad_norm': 0.4173823297023773, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 303038464}\n{'loss': 4.4971, 'grad_norm': 0.4799824357032776, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 304087040}\n{'loss': 4.5507, 'grad_norm': 0.4494017958641052, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 305135616}\n{'loss': 4.5655, 'grad_norm': 0.36501485109329224, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 306184192}\n{'loss': 4.5189, 'grad_norm': 0.4833853840827942, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 307232768}\n{'loss': 4.5387, 'grad_norm': 0.5214531421661377, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 308281344}\n{'loss': 4.5509, 'grad_norm': 0.5383253693580627, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 309329920}\n{'loss': 4.4112, 'grad_norm': 0.5364778637886047, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 310378496}\n{'loss': 4.568, 'grad_norm': 0.3624066114425659, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 311427072}\n{'loss': 4.5289, 'grad_norm': 0.5469081401824951, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 312475648}\n{'loss': 4.4953, 'grad_norm': 0.5212593674659729, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 313524224}\n{'loss': 4.4614, 'grad_norm': 0.36742305755615234, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 314572800}\n{'loss': 4.4757, 'grad_norm': 0.43591663241386414, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 315621376}\n{'loss': 4.5321, 'grad_norm': 0.483548104763031, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 316669952}\n{'loss': 4.449, 'grad_norm': 0.3971082866191864, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 317718528}\n{'loss': 4.4539, 'grad_norm': 0.3416251540184021, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 318767104}\n{'loss': 4.3456, 'grad_norm': 0.45731472969055176, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 319815680}\n{'loss': 4.4179, 'grad_norm': 0.4462226331233978, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 320864256}\n{'loss': 4.3691, 'grad_norm': 0.3393065631389618, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 321912832}\n{'loss': 4.4361, 'grad_norm': 0.39659640192985535, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 322961408}\n{'loss': 4.4166, 'grad_norm': 0.42212849855422974, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 324009984}\n{'loss': 4.3931, 'grad_norm': 0.3403238356113434, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 325058560}\n{'loss': 4.3003, 'grad_norm': 0.3405858278274536, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 326107136}\n{'loss': 4.4339, 'grad_norm': 0.42516669631004333, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 327155712}\n{'loss': 4.4258, 'grad_norm': 0.4387160539627075, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 328204288}\n{'loss': 4.3774, 'grad_norm': 0.3546140193939209, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 329252864}\n{'loss': 4.3261, 'grad_norm': 0.3842155933380127, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 330301440}\n{'loss': 4.2843, 'grad_norm': 0.32807183265686035, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 331350016}\n{'loss': 4.3627, 'grad_norm': 0.3635430932044983, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 332398592}\n{'loss': 4.3304, 'grad_norm': 0.32113364338874817, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 333447168}\n{'loss': 4.258, 'grad_norm': 0.3261938989162445, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 334495744}\n{'loss': 4.392, 'grad_norm': 0.35287028551101685, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 335544320}\n{'eval_loss': 4.340233325958252, 'eval_runtime': 641.4064, 'eval_samples_per_second': 25.544, 'eval_steps_per_second': 0.2, 'epoch': 0.04, 'num_input_tokens_seen': 335544320}\n{'loss': 4.4095, 'grad_norm': 0.30875736474990845, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 336592896}\n{'loss': 3.8896, 'grad_norm': 0.6334038972854614, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 337641472}\n{'loss': 4.449, 'grad_norm': 0.5519331693649292, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 338690048}\n{'loss': 4.4388, 'grad_norm': 0.4262654185295105, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 339738624}\n{'loss': 4.3918, 'grad_norm': 0.4348645508289337, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 340787200}\n{'loss': 4.3677, 'grad_norm': 0.3858915865421295, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 341835776}\n{'loss': 4.3343, 'grad_norm': 0.4542510509490967, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 342884352}\n{'loss': 4.3196, 'grad_norm': 0.4413583278656006, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 343932928}\n{'loss': 4.322, 'grad_norm': 0.5200892686843872, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 344981504}\n{'loss': 4.2409, 'grad_norm': 0.4969848692417145, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 346030080}\n{'loss': 4.2263, 'grad_norm': 0.43436068296432495, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 347078656}\n{'loss': 4.2271, 'grad_norm': 0.4760046899318695, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 348127232}\n{'loss': 4.3567, 'grad_norm': 0.43881112337112427, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 349175808}\n{'loss': 4.2606, 'grad_norm': 0.5361112952232361, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 350224384}\n{'loss': 4.3831, 'grad_norm': 0.5959597229957581, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 351272960}\n{'loss': 4.2899, 'grad_norm': 0.6709368824958801, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 352321536}\n{'loss': 4.2263, 'grad_norm': 0.6585149168968201, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 353370112}\n{'loss': 4.3428, 'grad_norm': 0.5447191596031189, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 354418688}\n{'loss': 4.3642, 'grad_norm': 0.576545238494873, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 355467264}\n{'loss': 4.025, 'grad_norm': 0.7567218542098999, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 356515840}\n{'loss': 4.2593, 'grad_norm': 0.6053742170333862, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 357564416}\n{'loss': 4.2864, 'grad_norm': 0.54949551820755, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 358612992}\n{'loss': 4.3183, 'grad_norm': 0.4792100489139557, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 359661568}\n{'loss': 4.2957, 'grad_norm': 0.4366244077682495, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 360710144}\n{'loss': 4.3502, 'grad_norm': 0.5610309839248657, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 361758720}\n{'loss': 4.2673, 'grad_norm': 0.42132946848869324, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 362807296}\n{'loss': 4.2565, 'grad_norm': 0.45927727222442627, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 363855872}\n{'loss': 4.3009, 'grad_norm': 0.40793168544769287, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 364904448}\n{'loss': 4.2584, 'grad_norm': 0.3818293511867523, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 365953024}\n{'loss': 4.3187, 'grad_norm': 0.4942944645881653, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 367001600}\n{'loss': 4.2056, 'grad_norm': 0.5316190719604492, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 368050176}\n{'loss': 4.2403, 'grad_norm': 0.4738222658634186, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 369098752}\n{'loss': 4.244, 'grad_norm': 0.41153445839881897, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 370147328}\n{'loss': 4.2876, 'grad_norm': 0.35864201188087463, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 371195904}\n{'loss': 4.2457, 'grad_norm': 0.4317127466201782, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 372244480}\n{'loss': 4.2138, 'grad_norm': 0.4922076165676117, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 373293056}\n{'loss': 4.1875, 'grad_norm': 0.5150508880615234, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 374341632}\n{'loss': 4.1485, 'grad_norm': 0.40701162815093994, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 375390208}\n{'loss': 4.1062, 'grad_norm': 0.40378910303115845, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 376438784}\n{'loss': 4.226, 'grad_norm': 0.4435281753540039, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 377487360}\n{'loss': 4.2034, 'grad_norm': 0.37908127903938293, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 378535936}\n{'loss': 4.1502, 'grad_norm': 0.408202588558197, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 379584512}\n{'loss': 4.1623, 'grad_norm': 0.4542413651943207, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 380633088}\n{'loss': 4.206, 'grad_norm': 0.5084658861160278, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 381681664}\n{'loss': 4.1867, 'grad_norm': 0.432908833026886, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 382730240}\n{'loss': 4.2377, 'grad_norm': 0.38273656368255615, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 383778816}\n{'loss': 4.1443, 'grad_norm': 0.39886555075645447, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 384827392}\n{'loss': 4.16, 'grad_norm': 0.4073260724544525, 'learning_rate': 0.001, 'epoch': 0.04, 'num_input_tokens_seen': 385875968}\n{'loss': 4.0871, 'grad_norm': 0.46062660217285156, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 386924544}\n{'loss': 4.1655, 'grad_norm': 0.3555128574371338, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 387973120}\n{'loss': 4.1993, 'grad_norm': 0.35318323969841003, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 389021696}\n{'loss': 4.0745, 'grad_norm': 0.3469637632369995, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 390070272}\n{'loss': 4.1844, 'grad_norm': 0.3650517761707306, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 391118848}\n{'loss': 4.1744, 'grad_norm': 0.4310692846775055, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 392167424}\n{'loss': 4.1896, 'grad_norm': 0.465585857629776, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 393216000}\n{'loss': 4.0568, 'grad_norm': 0.5539769530296326, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 394264576}\n{'loss': 4.2642, 'grad_norm': 0.5437971949577332, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 395313152}\n{'loss': 4.1705, 'grad_norm': 0.6534202694892883, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 396361728}\n{'loss': 3.9844, 'grad_norm': 0.7271204590797424, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 397410304}\n{'loss': 4.105, 'grad_norm': 0.7395262122154236, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 398458880}\n{'loss': 4.2332, 'grad_norm': 0.9734097719192505, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 399507456}\n{'loss': 4.1501, 'grad_norm': 1.1519765853881836, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 400556032}\n{'loss': 4.0756, 'grad_norm': 0.7837873697280884, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 401604608}\n{'loss': 4.013, 'grad_norm': 0.8097010850906372, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 402653184}\n{'eval_loss': 4.120734214782715, 'eval_runtime': 626.8806, 'eval_samples_per_second': 26.136, 'eval_steps_per_second': 0.204, 'epoch': 0.05, 'num_input_tokens_seen': 402653184}\n{'loss': 4.0955, 'grad_norm': 0.6811020970344543, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 403701760}\n{'loss': 4.0917, 'grad_norm': 0.5382081270217896, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 404750336}\n{'loss': 4.0414, 'grad_norm': 0.4250117242336273, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 405798912}\n{'loss': 4.1051, 'grad_norm': 0.4233124256134033, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 406847488}\n{'loss': 4.1475, 'grad_norm': 0.41960859298706055, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 407896064}\n{'loss': 4.0322, 'grad_norm': 0.4991297423839569, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 408944640}\n{'loss': 4.0664, 'grad_norm': 0.43890711665153503, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 409993216}\n{'loss': 4.1126, 'grad_norm': 0.38538315892219543, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 411041792}\n{'loss': 4.0591, 'grad_norm': 0.41170960664749146, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 412090368}\n{'loss': 4.1145, 'grad_norm': 0.42465972900390625, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 413138944}\n{'loss': 4.0393, 'grad_norm': 0.4215935468673706, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 414187520}\n{'loss': 3.9509, 'grad_norm': 0.5031537413597107, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 415236096}\n{'loss': 3.9314, 'grad_norm': 0.5212794542312622, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 416284672}\n{'loss': 4.062, 'grad_norm': 0.5779813528060913, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 417333248}\n{'loss': 4.0264, 'grad_norm': 0.5523960590362549, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 418381824}\n{'loss': 4.0366, 'grad_norm': 0.501869797706604, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 419430400}\n{'loss': 4.016, 'grad_norm': 0.390077143907547, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 420478976}\n{'loss': 3.9438, 'grad_norm': 0.39393457770347595, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 421527552}\n{'loss': 3.9882, 'grad_norm': 0.3395244777202606, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 422576128}\n{'loss': 3.95, 'grad_norm': 0.3985426425933838, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 423624704}\n{'loss': 3.9708, 'grad_norm': 0.4353885352611542, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 424673280}\n{'loss': 3.9959, 'grad_norm': 0.39546582102775574, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 425721856}\n{'loss': 3.9475, 'grad_norm': 0.3725046217441559, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 426770432}\n{'loss': 3.8599, 'grad_norm': 0.5391167998313904, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 427819008}\n{'loss': 3.9765, 'grad_norm': 0.5383077263832092, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 428867584}\n{'loss': 3.8999, 'grad_norm': 0.4455236494541168, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 429916160}\n{'loss': 4.0357, 'grad_norm': 0.4489726722240448, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 430964736}\n{'loss': 3.992, 'grad_norm': 0.45914894342422485, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 432013312}\n{'loss': 3.9556, 'grad_norm': 0.5718650817871094, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 433061888}\n{'loss': 3.9797, 'grad_norm': 0.5529163479804993, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 434110464}\n{'loss': 3.9479, 'grad_norm': 0.4689369201660156, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 435159040}\n{'loss': 3.9358, 'grad_norm': 0.448303759098053, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 436207616}\n{'loss': 3.9699, 'grad_norm': 0.4203392565250397, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 437256192}\n{'loss': 3.8173, 'grad_norm': 0.4046834707260132, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 438304768}\n{'loss': 3.8183, 'grad_norm': 0.3998134136199951, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 439353344}\n{'loss': 3.8477, 'grad_norm': 0.4120945632457733, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 440401920}\n{'loss': 3.8486, 'grad_norm': 0.39726078510284424, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 441450496}\n{'loss': 3.942, 'grad_norm': 0.399142861366272, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 442499072}\n{'loss': 3.9038, 'grad_norm': 0.41262856125831604, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 443547648}\n{'loss': 3.8447, 'grad_norm': 0.4645870327949524, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 444596224}\n{'loss': 3.9215, 'grad_norm': 0.49330976605415344, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 445644800}\n{'loss': 4.5329, 'grad_norm': 4.813076972961426, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 446693376}\n{'loss': 3.763, 'grad_norm': 1.0100675821304321, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 447741952}\n{'loss': 3.9888, 'grad_norm': 1.2422761917114258, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 448790528}\n{'loss': 3.9209, 'grad_norm': 1.1251254081726074, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 449839104}\n{'loss': 4.1438, 'grad_norm': 1.926529049873352, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 450887680}\n{'loss': 4.0952, 'grad_norm': 1.2948275804519653, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 451936256}\n{'loss': 3.9411, 'grad_norm': 1.1000643968582153, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 452984832}\n{'loss': 3.988, 'grad_norm': 1.3160468339920044, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 454033408}\n{'loss': 4.0241, 'grad_norm': 1.0201517343521118, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 455081984}\n{'loss': 3.9875, 'grad_norm': 0.9689710140228271, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 456130560}\n{'loss': 3.8684, 'grad_norm': 1.045577049255371, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 457179136}\n{'loss': 3.865, 'grad_norm': 0.931566059589386, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 458227712}\n{'loss': 3.728, 'grad_norm': 0.945274293422699, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 459276288}\n{'loss': 3.955, 'grad_norm': 0.7679930925369263, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 460324864}\n{'loss': 4.4113, 'grad_norm': 0.889451801776886, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 461373440}\n{'loss': 3.8928, 'grad_norm': 0.9069199562072754, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 462422016}\n{'loss': 3.9624, 'grad_norm': 0.8945743441581726, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 463470592}\n{'loss': 3.9698, 'grad_norm': 0.7373656630516052, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 464519168}\n{'loss': 3.921, 'grad_norm': 0.6688440442085266, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 465567744}\n{'loss': 3.8908, 'grad_norm': 0.5442579984664917, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 466616320}\n{'loss': 3.9138, 'grad_norm': 0.5583804845809937, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 467664896}\n{'loss': 3.8731, 'grad_norm': 0.504666268825531, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 468713472}\n{'loss': 3.7961, 'grad_norm': 0.4965992867946625, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 469762048}\n{'eval_loss': 3.7728981971740723, 'eval_runtime': 616.374, 'eval_samples_per_second': 26.581, 'eval_steps_per_second': 0.208, 'epoch': 0.05, 'num_input_tokens_seen': 469762048}\n{'loss': 3.8829, 'grad_norm': 0.44414225220680237, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 470810624}\n{'loss': 3.6939, 'grad_norm': 0.5276159644126892, 'learning_rate': 0.001, 'epoch': 0.05, 'num_input_tokens_seen': 471859200}\n{'loss': 3.8173, 'grad_norm': 0.4666613042354584, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 472907776}\n{'loss': 3.6884, 'grad_norm': 0.4581243097782135, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 473956352}\n{'loss': 3.789, 'grad_norm': 0.4697781205177307, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 475004928}\n{'loss': 3.8791, 'grad_norm': 0.5336131453514099, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 476053504}\n{'loss': 3.8077, 'grad_norm': 0.5709654092788696, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 477102080}\n{'loss': 3.8421, 'grad_norm': 0.5592761039733887, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 478150656}\n{'loss': 3.8135, 'grad_norm': 0.4490680694580078, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 479199232}\n{'loss': 3.7535, 'grad_norm': 0.3931736648082733, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 480247808}\n{'loss': 3.7885, 'grad_norm': 0.41578060388565063, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 481296384}\n{'loss': 3.6255, 'grad_norm': 0.429817795753479, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 482344960}\n{'loss': 3.7202, 'grad_norm': 0.49616578221321106, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 483393536}\n 9%|▊ | 704/8192 [9:33:48<79:08:04, 38.05s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 0faae356-e828-4cff-9a49-42b397431927)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_185.jsonl.zst\n 9%|▊ | 704/8192 [9:38:28<79:08:04, 38.05s/it]Retrying in 1s [Retry 1/5].\n 9%|▊ | 704/8192 [9:38:28<79:08:04, 38.05s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 9557423f-6937-4f70-b50f-05b0c01f5bf3)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_4035.jsonl.zst\n 9%|▊ | 704/8192 [9:44:58<79:08:04, 38.05s/it]Retrying in 1s [Retry 1/5].\n 10%|█ | 832/8192 [11:28:20<80:32:25, 39.39s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 939d1d36-c607-4d3c-a0a0-8e447579340b)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_165.jsonl.zst\n 10%|█ | 832/8192 [11:30:25<80:32:25, 39.39s/it]Retrying in 1s [Retry 1/5].\n 10%|█ | 832/8192 [11:30:25<80:32:25, 39.39s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 0b99bfd1-07ae-46db-81fa-fc6ef0eabdbc)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_1529.jsonl.zst\n 10%|█ | 832/8192 [11:38:24<80:32:25, 39.39s/it]Retrying in 1s [Retry 1/5].\n 10%|█ | 832/8192 [11:38:24<80:32:25, 39.39s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c208d1bb-5d13-45d2-9a01-1d5a2defa598)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_4562.jsonl.zst\n 10%|█ | 832/8192 [11:39:58<80:32:25, 39.39s/it]Retrying in 1s [Retry 1/5].\n 10%|█ | 832/8192 [11:39:58<80:32:25, 39.39s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 2bf98b5c-473b-4e00-aca2-b152efddb992)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_4414.jsonl.zst\n 10%|█ | 832/8192 [11:41:00<80:32:25, 39.39s/it]Retrying in 1s [Retry 1/5].\n 11%|█ | 896/8192 [12:24:54<77:09:28, 38.07s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 3b8321b9-2d88-4bfa-9eca-b201c444cba3)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_405.jsonl.zst\n 11%|█ | 896/8192 [12:25:55<77:09:28, 38.07s/it]Retrying in 1s [Retry 1/5].\n 11%|█ | 896/8192 [12:25:55<77:09:28, 38.07s/it]'(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: a98a238a-c0a4-4295-8502-316a89a7ae29)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_2524.jsonl.zst\n 11%|█ | 896/8192 [12:33:14<77:09:28, 38.07s/it]Retrying in 1s [Retry 1/5].\n 11%|█▏ | 922/8192 [12:52:49<76:09:46, 37.71s/it]'(ProtocolError('Connection aborted.', RemoteDisconnected('Remote end closed connection without response')), '(Request ID: 36a7cc72-4605-416a-8742-59488d719150)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk1/example_train_5267.jsonl.zst\n 11%|█▏ | 922/8192 [12:52:59<76:09:46, 37.71s/it]Retrying in 1s [Retry 1/5].\n 12%|█▏ | 943/8192 [13:06:07<76:15:57, 37.88s/it]\n{'loss': 3.7796, 'grad_norm': 0.4774172008037567, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 484442112}\n{'loss': 3.7779, 'grad_norm': 0.45830512046813965, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 485490688}\n{'loss': 3.6516, 'grad_norm': 0.4130597710609436, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 486539264}\n{'loss': 3.7018, 'grad_norm': 0.3804127275943756, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 487587840}\n{'loss': 3.6893, 'grad_norm': 0.36560356616973877, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 488636416}\n{'loss': 3.6362, 'grad_norm': 0.3827981948852539, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 489684992}\n{'loss': 3.5987, 'grad_norm': 0.37492236495018005, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 490733568}\n{'loss': 3.7165, 'grad_norm': 0.46995237469673157, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 491782144}\n{'loss': 3.6097, 'grad_norm': 0.4908960461616516, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 492830720}\n{'loss': 3.6035, 'grad_norm': 0.5318525433540344, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 493879296}\n{'loss': 3.6643, 'grad_norm': 0.4848596453666687, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 494927872}\n{'loss': 3.6586, 'grad_norm': 0.4421922266483307, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 495976448}\n{'loss': 3.5902, 'grad_norm': 0.4107126295566559, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 497025024}\n{'loss': 3.6937, 'grad_norm': 0.3975088894367218, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 498073600}\n{'loss': 3.6496, 'grad_norm': 0.4559416174888611, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 499122176}\n{'loss': 3.66, 'grad_norm': 0.41401296854019165, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 500170752}\n{'loss': 3.5551, 'grad_norm': 0.45235902070999146, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 501219328}\n{'loss': 3.4794, 'grad_norm': 0.427593857049942, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 502267904}\n{'loss': 3.5345, 'grad_norm': 0.4024144411087036, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 503316480}\n{'loss': 3.5784, 'grad_norm': 0.410284161567688, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 504365056}\n{'loss': 3.6177, 'grad_norm': 0.37683290243148804, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 505413632}\n{'loss': 3.5883, 'grad_norm': 0.417323499917984, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 506462208}\n{'loss': 3.5888, 'grad_norm': 0.4327872693538666, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 507510784}\n{'loss': 3.5891, 'grad_norm': 0.5366392731666565, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 508559360}\n{'loss': 3.3725, 'grad_norm': 0.45735156536102295, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 509607936}\n{'loss': 3.5674, 'grad_norm': 0.4255360960960388, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 510656512}\n{'loss': 3.3523, 'grad_norm': 0.6517689824104309, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 511705088}\n{'loss': 3.5901, 'grad_norm': 0.5713740587234497, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 512753664}\n{'loss': 3.542, 'grad_norm': 0.5570502281188965, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 513802240}\n{'loss': 3.4246, 'grad_norm': 0.6477808356285095, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 514850816}\n{'loss': 3.4954, 'grad_norm': 0.5195346474647522, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 515899392}\n{'loss': 3.6516, 'grad_norm': 0.5446246862411499, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 516947968}\n{'loss': 3.5955, 'grad_norm': 0.5475099086761475, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 517996544}\n{'loss': 3.5516, 'grad_norm': 0.4719395041465759, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 519045120}\n{'loss': 3.5439, 'grad_norm': 0.43647533655166626, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 520093696}\n{'loss': 3.579, 'grad_norm': 0.5048384070396423, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 521142272}\n{'loss': 3.4742, 'grad_norm': 0.4902295172214508, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 522190848}\n{'loss': 3.4363, 'grad_norm': 0.525496244430542, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 523239424}\n{'loss': 3.3658, 'grad_norm': 0.5224571824073792, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 524288000}\n{'loss': 3.4816, 'grad_norm': 0.45781856775283813, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 525336576}\n{'loss': 3.4612, 'grad_norm': 0.3764704763889313, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 526385152}\n{'loss': 3.5172, 'grad_norm': 0.3994409143924713, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 527433728}\n{'loss': 3.5462, 'grad_norm': 0.45144984126091003, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 528482304}\n{'loss': 3.5079, 'grad_norm': 0.4901409149169922, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 529530880}\n{'loss': 3.5187, 'grad_norm': 0.45689818263053894, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 530579456}\n{'loss': 3.4408, 'grad_norm': 0.4650699198246002, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 531628032}\n{'loss': 3.4019, 'grad_norm': 0.40419647097587585, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 532676608}\n{'loss': 3.5255, 'grad_norm': 0.3895981013774872, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 533725184}\n{'loss': 3.312, 'grad_norm': 0.46533191204071045, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 534773760}\n{'loss': 3.4233, 'grad_norm': 0.5021492838859558, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 535822336}\n{'loss': 3.4211, 'grad_norm': 0.6763796806335449, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 536870912}\n{'eval_loss': 3.38647198677063, 'eval_runtime': 681.5531, 'eval_samples_per_second': 24.039, 'eval_steps_per_second': 0.188, 'epoch': 0.06, 'num_input_tokens_seen': 536870912}\n{'loss': 3.2825, 'grad_norm': 0.75739586353302, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 537919488}\n{'loss': 3.4758, 'grad_norm': 0.49962809681892395, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 538968064}\n{'loss': 3.4105, 'grad_norm': 0.47640085220336914, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 540016640}\n{'loss': 3.4393, 'grad_norm': 0.4722411632537842, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 541065216}\n{'loss': 3.4254, 'grad_norm': 0.4715781807899475, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 542113792}\n{'loss': 3.3992, 'grad_norm': 0.474001407623291, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 543162368}\n{'loss': 3.4274, 'grad_norm': 0.48976385593414307, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 544210944}\n{'loss': 3.3255, 'grad_norm': 0.4819697141647339, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 545259520}\n{'loss': 3.3679, 'grad_norm': 0.37490880489349365, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 546308096}\n{'loss': 3.377, 'grad_norm': 0.4356544315814972, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 547356672}\n{'loss': 3.4294, 'grad_norm': 0.3786229193210602, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 548405248}\n{'loss': 3.2323, 'grad_norm': 0.4364008605480194, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 549453824}\n{'loss': 3.4615, 'grad_norm': 0.39242950081825256, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 550502400}\n{'loss': 3.3589, 'grad_norm': 0.4270903766155243, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 551550976}\n{'loss': 3.4366, 'grad_norm': 0.4204763174057007, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 552599552}\n{'loss': 3.3859, 'grad_norm': 0.554025411605835, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 553648128}\n{'loss': 3.2353, 'grad_norm': 0.5719075798988342, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 554696704}\n{'loss': 3.3798, 'grad_norm': 0.4803822338581085, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 555745280}\n{'loss': 3.1191, 'grad_norm': 0.5494056344032288, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 556793856}\n{'loss': 3.424, 'grad_norm': 0.4569101333618164, 'learning_rate': 0.001, 'epoch': 0.06, 'num_input_tokens_seen': 557842432}\n{'loss': 3.4299, 'grad_norm': 0.48103874921798706, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 558891008}\n{'loss': 3.3483, 'grad_norm': 0.44187718629837036, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 559939584}\n{'loss': 3.3196, 'grad_norm': 0.4359618127346039, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 560988160}\n{'loss': 3.4479, 'grad_norm': 0.37653473019599915, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 562036736}\n{'loss': 3.2509, 'grad_norm': 0.4397211968898773, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 563085312}\n{'loss': 3.4193, 'grad_norm': 0.5013746619224548, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 564133888}\n{'loss': 3.3391, 'grad_norm': 0.5044407844543457, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 565182464}\n{'loss': 3.3223, 'grad_norm': 0.45118412375450134, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 566231040}\n{'loss': 3.3041, 'grad_norm': 0.5617747902870178, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 567279616}\n{'loss': 3.3436, 'grad_norm': 0.5154598355293274, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 568328192}\n{'loss': 3.3739, 'grad_norm': 0.4647876024246216, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 569376768}\n{'loss': 3.3366, 'grad_norm': 0.3766598701477051, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 570425344}\n{'loss': 3.3098, 'grad_norm': 0.40857356786727905, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 571473920}\n{'loss': 3.0331, 'grad_norm': 0.4163903594017029, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 572522496}\n{'loss': 3.3184, 'grad_norm': 0.38519713282585144, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 573571072}\n{'loss': 3.3886, 'grad_norm': 0.38155344128608704, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 574619648}\n{'loss': 3.2855, 'grad_norm': 0.3684964179992676, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 575668224}\n{'loss': 3.0484, 'grad_norm': 0.3504279553890228, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 576716800}\n{'loss': 3.2702, 'grad_norm': 0.42653048038482666, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 577765376}\n{'loss': 3.312, 'grad_norm': 0.4263192415237427, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 578813952}\n{'loss': 3.3355, 'grad_norm': 0.4272316098213196, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 579862528}\n{'loss': 3.2806, 'grad_norm': 0.40996676683425903, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 580911104}\n{'loss': 3.2504, 'grad_norm': 0.403242826461792, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 581959680}\n{'loss': 3.2924, 'grad_norm': 0.46690869331359863, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 583008256}\n{'loss': 3.1466, 'grad_norm': 0.515250027179718, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 584056832}\n{'loss': 3.2898, 'grad_norm': 0.4872475266456604, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 585105408}\n{'loss': 3.3699, 'grad_norm': 0.43510228395462036, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 586153984}\n{'loss': 3.1568, 'grad_norm': 0.4732394814491272, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 587202560}\n{'loss': 3.2145, 'grad_norm': 0.49767330288887024, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 588251136}\n{'loss': 3.2966, 'grad_norm': 0.4968816936016083, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 589299712}\n{'loss': 3.2249, 'grad_norm': 0.4123048782348633, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 590348288}\n{'loss': 3.3819, 'grad_norm': 0.4349605143070221, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 591396864}\n{'loss': 3.3477, 'grad_norm': 0.47485488653182983, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 592445440}\n{'loss': 3.3202, 'grad_norm': 0.46784669160842896, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 593494016}\n{'loss': 3.2231, 'grad_norm': 0.42318931221961975, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 594542592}\n{'loss': 3.2901, 'grad_norm': 0.40393564105033875, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 595591168}\n{'loss': 3.2065, 'grad_norm': 0.4144214391708374, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 596639744}\n{'loss': 2.8698, 'grad_norm': 0.40921372175216675, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 597688320}\n{'loss': 3.2242, 'grad_norm': 0.35226207971572876, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 598736896}\n{'loss': 3.2125, 'grad_norm': 0.43364742398262024, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 599785472}\n{'loss': 3.2296, 'grad_norm': 0.4272080361843109, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 600834048}\n{'loss': 2.9346, 'grad_norm': 0.4155097007751465, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 601882624}\n{'loss': 3.2706, 'grad_norm': 0.4263918697834015, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 602931200}\n{'loss': 3.3124, 'grad_norm': 0.43336594104766846, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 603979776}\n{'eval_loss': 3.1686322689056396, 'eval_runtime': 664.0006, 'eval_samples_per_second': 24.675, 'eval_steps_per_second': 0.193, 'epoch': 0.07, 'num_input_tokens_seen': 603979776}\n{'loss': 3.349, 'grad_norm': 0.4504219889640808, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 605028352}\n{'loss': 3.3015, 'grad_norm': 0.5899333953857422, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 606076928}\n{'loss': 3.2036, 'grad_norm': 0.5814825892448425, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 607125504}\n{'loss': 3.2786, 'grad_norm': 0.3971703350543976, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 608174080}\n{'loss': 3.0979, 'grad_norm': 0.5669280290603638, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 609222656}\n{'loss': 3.0683, 'grad_norm': 0.4786263406276703, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 610271232}\n{'loss': 3.1731, 'grad_norm': 0.46415817737579346, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 611319808}\n{'loss': 3.2282, 'grad_norm': 0.4295870363712311, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 612368384}\n{'loss': 3.2196, 'grad_norm': 0.4184265732765198, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 613416960}\n{'loss': 3.2445, 'grad_norm': 0.4624122381210327, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 614465536}\n{'loss': 3.1135, 'grad_norm': 0.3681364059448242, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 615514112}\n{'loss': 3.1877, 'grad_norm': 0.3612712621688843, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 616562688}\n{'loss': 3.308, 'grad_norm': 0.34696292877197266, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 617611264}\n{'loss': 3.4995, 'grad_norm': 0.5025363564491272, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 618659840}\n{'loss': 3.1853, 'grad_norm': 0.6652331352233887, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 619708416}\n{'loss': 3.1844, 'grad_norm': 0.7156277894973755, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 620756992}\n{'loss': 3.2325, 'grad_norm': 0.5241081118583679, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 621805568}\n{'loss': 2.972, 'grad_norm': 0.5001779198646545, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 622854144}\n{'loss': 3.1742, 'grad_norm': 0.4062795341014862, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 623902720}\n{'loss': 3.2539, 'grad_norm': 0.4671201705932617, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 624951296}\n{'loss': 3.1948, 'grad_norm': 0.3894169330596924, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 625999872}\n{'loss': 3.2469, 'grad_norm': 0.4665684998035431, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 627048448}\n{'loss': 3.2742, 'grad_norm': 0.43211206793785095, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 628097024}\n{'loss': 3.1195, 'grad_norm': 0.4476025700569153, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 629145600}\n{'loss': 3.2127, 'grad_norm': 0.3596750795841217, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 630194176}\n{'loss': 3.1741, 'grad_norm': 0.40869519114494324, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 631242752}\n{'loss': 3.1708, 'grad_norm': 0.36658936738967896, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 632291328}\n{'loss': 3.0925, 'grad_norm': 0.35227081179618835, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 633339904}\n{'loss': 3.171, 'grad_norm': 0.3942136764526367, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 634388480}\n{'loss': 3.1729, 'grad_norm': 0.3163004219532013, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 635437056}\n{'loss': 3.1683, 'grad_norm': 0.35835322737693787, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 636485632}\n{'loss': 3.1118, 'grad_norm': 0.3395129144191742, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 637534208}\n{'loss': 3.2123, 'grad_norm': 0.38003110885620117, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 638582784}\n{'loss': 3.167, 'grad_norm': 0.4000258445739746, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 639631360}\n{'loss': 3.0668, 'grad_norm': 0.38393035531044006, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 640679936}\n{'loss': 2.9125, 'grad_norm': 0.38961607217788696, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 641728512}\n{'loss': 3.1024, 'grad_norm': 0.3406165540218353, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 642777088}\n{'loss': 3.1262, 'grad_norm': 0.4859096109867096, 'learning_rate': 0.001, 'epoch': 0.07, 'num_input_tokens_seen': 643825664}\n{'loss': 3.1155, 'grad_norm': 0.5454179048538208, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 644874240}\n{'loss': 3.1594, 'grad_norm': 0.46631914377212524, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 645922816}\n{'loss': 3.1164, 'grad_norm': 0.4049534797668457, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 646971392}\n{'loss': 2.9272, 'grad_norm': 0.32954707741737366, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 648019968}\n{'loss': 3.0888, 'grad_norm': 0.409853458404541, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 649068544}\n{'loss': 3.2185, 'grad_norm': 0.43080267310142517, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 650117120}\n{'loss': 3.1871, 'grad_norm': 0.4323279857635498, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 651165696}\n{'loss': 2.9759, 'grad_norm': 0.3696155846118927, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 652214272}\n{'loss': 3.1058, 'grad_norm': 0.3963398337364197, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 653262848}\n{'loss': 3.1214, 'grad_norm': 0.4020082652568817, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 654311424}\n{'loss': 3.0678, 'grad_norm': 0.4210987091064453, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 655360000}\n{'loss': 2.9177, 'grad_norm': 0.44535601139068604, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 656408576}\n{'loss': 3.1005, 'grad_norm': 0.363700807094574, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 657457152}\n{'loss': 3.0285, 'grad_norm': 0.393673837184906, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 658505728}\n{'loss': 3.031, 'grad_norm': 0.3472498059272766, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 659554304}\n{'loss': 3.1837, 'grad_norm': 0.45663976669311523, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 660602880}\n{'loss': 3.1636, 'grad_norm': 0.44765880703926086, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 661651456}\n{'loss': 3.0421, 'grad_norm': 0.5289708375930786, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 662700032}\n{'loss': 2.9394, 'grad_norm': 0.5272406339645386, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 663748608}\n{'loss': 3.2419, 'grad_norm': 0.5471237301826477, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 664797184}\n{'loss': 3.1506, 'grad_norm': 0.5762659311294556, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 665845760}\n{'loss': 3.1258, 'grad_norm': 0.5486758351325989, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 666894336}\n{'loss': 3.1686, 'grad_norm': 0.4877275228500366, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 667942912}\n{'loss': 3.1062, 'grad_norm': 0.35992035269737244, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 668991488}\n{'loss': 3.1655, 'grad_norm': 0.39184319972991943, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 670040064}\n{'loss': 3.1455, 'grad_norm': 0.46003854274749756, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 671088640}\n{'eval_loss': 3.036459445953369, 'eval_runtime': 676.6057, 'eval_samples_per_second': 24.215, 'eval_steps_per_second': 0.189, 'epoch': 0.08, 'num_input_tokens_seen': 671088640}\n{'loss': 3.1058, 'grad_norm': 0.45958808064460754, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 672137216}\n{'loss': 3.0861, 'grad_norm': 0.41562288999557495, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 673185792}\n{'loss': 3.1135, 'grad_norm': 0.38576263189315796, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 674234368}\n{'loss': 2.9998, 'grad_norm': 0.3936232924461365, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 675282944}\n{'loss': 3.1349, 'grad_norm': 0.3888678252696991, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 676331520}\n{'loss': 2.9192, 'grad_norm': 0.31759846210479736, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 677380096}\n{'loss': 3.1324, 'grad_norm': 0.3801535964012146, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 678428672}\n{'loss': 3.1064, 'grad_norm': 0.36299699544906616, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 679477248}\n{'loss': 3.2258, 'grad_norm': 0.36732324957847595, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 680525824}\n{'loss': 3.2162, 'grad_norm': 0.42108356952667236, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 681574400}\n{'loss': 3.2189, 'grad_norm': 0.4113474190235138, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 682622976}\n{'loss': 3.0585, 'grad_norm': 0.39936116337776184, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 683671552}\n{'loss': 3.0693, 'grad_norm': 0.35424771904945374, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 684720128}\n{'loss': 3.1134, 'grad_norm': 0.3333597183227539, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 685768704}\n{'loss': 3.0536, 'grad_norm': 0.37569180130958557, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 686817280}\n{'loss': 3.1396, 'grad_norm': 0.33836638927459717, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 687865856}\n{'loss': 3.1353, 'grad_norm': 0.31407052278518677, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 688914432}\n{'loss': 2.9977, 'grad_norm': 0.34316036105155945, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 689963008}\n{'loss': 3.1683, 'grad_norm': 0.3779186010360718, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 691011584}\n{'loss': 2.9567, 'grad_norm': 0.3414095342159271, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 692060160}\n{'loss': 3.0806, 'grad_norm': 0.31614938378334045, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 693108736}\n{'loss': 3.0975, 'grad_norm': 0.35552725195884705, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 694157312}\n{'loss': 3.0241, 'grad_norm': 0.38724133372306824, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 695205888}\n{'loss': 3.0701, 'grad_norm': 0.3581823408603668, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 696254464}\n{'loss': 3.0222, 'grad_norm': 0.3632317781448364, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 697303040}\n{'loss': 3.0188, 'grad_norm': 0.40560677647590637, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 698351616}\n{'loss': 3.106, 'grad_norm': 0.3953804075717926, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 699400192}\n{'loss': 3.1552, 'grad_norm': 0.40652376413345337, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 700448768}\n{'loss': 2.8893, 'grad_norm': 0.3625616133213043, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 701497344}\n{'loss': 2.9183, 'grad_norm': 0.3450768291950226, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 702545920}\n{'loss': 2.9828, 'grad_norm': 0.36742398142814636, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 703594496}\n{'loss': 3.0327, 'grad_norm': 0.3611394762992859, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 704643072}\n{'loss': 3.1466, 'grad_norm': 0.3593210279941559, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 705691648}\n{'loss': 3.0163, 'grad_norm': 0.3994838297367096, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 706740224}\n{'loss': 3.0563, 'grad_norm': 0.41202738881111145, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 707788800}\n{'loss': 3.0912, 'grad_norm': 0.3404449224472046, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 708837376}\n{'loss': 3.0108, 'grad_norm': 0.3745224177837372, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 709885952}\n{'loss': 3.0864, 'grad_norm': 0.4320204555988312, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 710934528}\n{'loss': 3.0387, 'grad_norm': 0.34649956226348877, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 711983104}\n{'loss': 3.013, 'grad_norm': 0.34744057059288025, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 713031680}\n{'loss': 3.0985, 'grad_norm': 0.3638330101966858, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 714080256}\n{'loss': 3.1498, 'grad_norm': 0.43823716044425964, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 715128832}\n{'loss': 3.0366, 'grad_norm': 0.6364668011665344, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 716177408}\n{'loss': 2.9614, 'grad_norm': 0.6294976472854614, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 717225984}\n{'loss': 3.0619, 'grad_norm': 0.5871465802192688, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 718274560}\n{'loss': 3.1489, 'grad_norm': 0.7779986262321472, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 719323136}\n{'loss': 3.1331, 'grad_norm': 1.102079153060913, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 720371712}\n{'loss': 3.1423, 'grad_norm': 0.6352481245994568, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 721420288}\n{'loss': 3.1509, 'grad_norm': 0.5698557496070862, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 722468864}\n{'loss': 2.6683, 'grad_norm': 0.501290500164032, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 723517440}\n{'loss': 3.0334, 'grad_norm': 0.4512772560119629, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 724566016}\n{'loss': 3.0485, 'grad_norm': 0.4409146308898926, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 725614592}\n{'loss': 3.0154, 'grad_norm': 0.3902524411678314, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 726663168}\n{'loss': 3.0742, 'grad_norm': 0.3692473769187927, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 727711744}\n{'loss': 2.8306, 'grad_norm': 0.385005384683609, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 728760320}\n{'loss': 2.9258, 'grad_norm': 0.37514418363571167, 'learning_rate': 0.001, 'epoch': 0.08, 'num_input_tokens_seen': 729808896}\n{'loss': 3.0061, 'grad_norm': 0.42038342356681824, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 730857472}\n{'loss': 3.0588, 'grad_norm': 0.40415653586387634, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 731906048}\n{'loss': 2.9542, 'grad_norm': 0.38514354825019836, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 732954624}\n{'loss': 2.9252, 'grad_norm': 0.3861909806728363, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 734003200}\n{'loss': 2.8432, 'grad_norm': 0.40519189834594727, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 735051776}\n{'loss': 2.9779, 'grad_norm': 0.37011685967445374, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 736100352}\n{'loss': 2.9908, 'grad_norm': 0.34850460290908813, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 737148928}\n{'loss': 2.9589, 'grad_norm': 0.371500700712204, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 738197504}\n[2025-03-11 00:58:41 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 0faae356-e828-4cff-9a49-42b397431927)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_185.jsonl.zst\n[2025-03-11 00:58:41 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-11 01:05:12 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 9557423f-6937-4f70-b50f-05b0c01f5bf3)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_4035.jsonl.zst\n[2025-03-11 01:05:12 WARNING] Retrying in 1s [Retry 1/5].\n{'eval_loss': 2.9496541023254395, 'eval_runtime': 714.5105, 'eval_samples_per_second': 22.93, 'eval_steps_per_second': 0.179, 'epoch': 0.09, 'num_input_tokens_seen': 738197504}\n{'loss': 2.9029, 'grad_norm': 0.3044391870498657, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 739246080}\n{'loss': 2.8536, 'grad_norm': 0.34875407814979553, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 740294656}\n{'loss': 2.8478, 'grad_norm': 0.4568244516849518, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 741343232}\n{'loss': 3.1164, 'grad_norm': 0.44005003571510315, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 742391808}\n{'loss': 2.8584, 'grad_norm': 0.39490336179733276, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 743440384}\n{'loss': 3.0681, 'grad_norm': 0.4427798092365265, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 744488960}\n{'loss': 3.0315, 'grad_norm': 0.4771106243133545, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 745537536}\n{'loss': 2.8794, 'grad_norm': 0.4624035656452179, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 746586112}\n{'loss': 2.9624, 'grad_norm': 0.4244724214076996, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 747634688}\n{'loss': 2.9925, 'grad_norm': 0.39176708459854126, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 748683264}\n{'loss': 2.9753, 'grad_norm': 0.43686383962631226, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 749731840}\n{'loss': 3.0718, 'grad_norm': 0.4536241590976715, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 750780416}\n{'loss': 3.0065, 'grad_norm': 0.3421417772769928, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 751828992}\n{'loss': 2.8965, 'grad_norm': 0.30937010049819946, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 752877568}\n{'loss': 3.0347, 'grad_norm': 0.33371758460998535, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 753926144}\n{'loss': 3.0133, 'grad_norm': 0.3285418450832367, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 754974720}\n{'loss': 3.1219, 'grad_norm': 0.33177846670150757, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 756023296}\n{'loss': 2.9354, 'grad_norm': 0.36487525701522827, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 757071872}\n{'loss': 3.133, 'grad_norm': 0.35576146841049194, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 758120448}\n{'loss': 2.9771, 'grad_norm': 0.4217855930328369, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 759169024}\n{'loss': 2.9906, 'grad_norm': 0.4007001519203186, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 760217600}\n{'loss': 3.0219, 'grad_norm': 0.36323100328445435, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 761266176}\n{'loss': 2.89, 'grad_norm': 0.323297381401062, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 762314752}\n{'loss': 2.8566, 'grad_norm': 0.3450233042240143, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 763363328}\n{'loss': 3.0536, 'grad_norm': 0.36228489875793457, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 764411904}\n{'loss': 2.9259, 'grad_norm': 0.3553276062011719, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 765460480}\n{'loss': 2.8431, 'grad_norm': 0.37074941396713257, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 766509056}\n{'loss': 3.0549, 'grad_norm': 0.4105451703071594, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 767557632}\n{'loss': 2.8431, 'grad_norm': 0.4433744549751282, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 768606208}\n{'loss': 2.9545, 'grad_norm': 0.4024113416671753, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 769654784}\n{'loss': 2.9237, 'grad_norm': 0.3534025549888611, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 770703360}\n{'loss': 2.9306, 'grad_norm': 0.3788505792617798, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 771751936}\n{'loss': 2.9218, 'grad_norm': 0.3302527666091919, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 772800512}\n{'loss': 3.0647, 'grad_norm': 0.36651748418807983, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 773849088}\n{'loss': 3.0289, 'grad_norm': 0.35838624835014343, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 774897664}\n{'loss': 2.9157, 'grad_norm': 0.34652525186538696, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 775946240}\n{'loss': 2.9358, 'grad_norm': 0.37369009852409363, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 776994816}\n{'loss': 3.0725, 'grad_norm': 0.37748783826828003, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 778043392}\n{'loss': 2.8444, 'grad_norm': 0.339287132024765, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 779091968}\n{'loss': 2.859, 'grad_norm': 0.3415367305278778, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 780140544}\n{'loss': 2.9334, 'grad_norm': 0.3661401569843292, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 781189120}\n{'loss': 3.0287, 'grad_norm': 0.3512025773525238, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 782237696}\n{'loss': 2.8093, 'grad_norm': 0.3412944972515106, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 783286272}\n{'loss': 2.9112, 'grad_norm': 0.35280412435531616, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 784334848}\n{'loss': 2.8939, 'grad_norm': 0.3652521073818207, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 785383424}\n{'loss': 2.961, 'grad_norm': 0.3336659371852875, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 786432000}\n{'loss': 2.9547, 'grad_norm': 0.3242711126804352, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 787480576}\n{'loss': 2.8035, 'grad_norm': 0.3276830017566681, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 788529152}\n{'loss': 2.9639, 'grad_norm': 0.32558611035346985, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 789577728}\n{'loss': 2.9981, 'grad_norm': 0.32141759991645813, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 790626304}\n{'loss': 2.8053, 'grad_norm': 0.33697575330734253, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 791674880}\n{'loss': 2.9265, 'grad_norm': 0.3305177092552185, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 792723456}\n{'loss': 2.9357, 'grad_norm': 0.3303467035293579, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 793772032}\n{'loss': 2.9209, 'grad_norm': 0.33826348185539246, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 794820608}\n{'loss': 3.0134, 'grad_norm': 0.3682444393634796, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 795869184}\n{'loss': 2.8786, 'grad_norm': 0.364545613527298, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 796917760}\n{'loss': 3.0202, 'grad_norm': 0.4031524360179901, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 797966336}\n{'loss': 2.4912, 'grad_norm': 0.40752920508384705, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 799014912}\n{'loss': 2.9311, 'grad_norm': 0.36912065744400024, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 800063488}\n{'loss': 2.8768, 'grad_norm': 0.3906254172325134, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 801112064}\n{'loss': 2.8677, 'grad_norm': 0.3680756092071533, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 802160640}\n{'loss': 2.967, 'grad_norm': 0.42479801177978516, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 803209216}\n{'loss': 3.0138, 'grad_norm': 0.4966808259487152, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 804257792}\n{'loss': 2.9186, 'grad_norm': 0.413562536239624, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 805306368}\n{'eval_loss': 2.8718671798706055, 'eval_runtime': 1149.5487, 'eval_samples_per_second': 14.253, 'eval_steps_per_second': 0.111, 'epoch': 0.09, 'num_input_tokens_seen': 805306368}\n{'loss': 2.8717, 'grad_norm': 0.3343268632888794, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 806354944}\n{'loss': 3.0123, 'grad_norm': 0.42326104640960693, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 807403520}\n{'loss': 2.9691, 'grad_norm': 0.35408785939216614, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 808452096}\n{'loss': 2.8862, 'grad_norm': 0.35168665647506714, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 809500672}\n{'loss': 2.9754, 'grad_norm': 0.3385300934314728, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 810549248}\n{'loss': 2.751, 'grad_norm': 0.36974239349365234, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 811597824}\n{'loss': 2.8481, 'grad_norm': 0.3535187244415283, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 812646400}\n{'loss': 2.9605, 'grad_norm': 0.39851564168930054, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 813694976}\n{'loss': 2.9251, 'grad_norm': 0.35983574390411377, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 814743552}\n{'loss': 2.8766, 'grad_norm': 0.34153202176094055, 'learning_rate': 0.001, 'epoch': 0.09, 'num_input_tokens_seen': 815792128}\n{'loss': 2.9205, 'grad_norm': 0.3700859546661377, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 816840704}\n{'loss': 2.7621, 'grad_norm': 0.3954067528247833, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 817889280}\n{'loss': 2.886, 'grad_norm': 0.4191531538963318, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 818937856}\n{'loss': 2.9203, 'grad_norm': 0.3315434157848358, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 819986432}\n{'loss': 2.9563, 'grad_norm': 0.3308311700820923, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 821035008}\n{'loss': 2.9391, 'grad_norm': 0.3073643445968628, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 822083584}\n{'loss': 2.7197, 'grad_norm': 0.3343094289302826, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 823132160}\n{'loss': 2.909, 'grad_norm': 0.31464704871177673, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 824180736}\n{'loss': 2.8581, 'grad_norm': 0.40213140845298767, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 825229312}\n{'loss': 2.9224, 'grad_norm': 0.36158621311187744, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 826277888}\n{'loss': 2.985, 'grad_norm': 0.3831183910369873, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 827326464}\n{'loss': 2.8964, 'grad_norm': 0.3219353258609772, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 828375040}\n{'loss': 3.0832, 'grad_norm': 0.31743234395980835, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 829423616}\n{'loss': 2.9602, 'grad_norm': 0.3629371225833893, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 830472192}\n{'loss': 2.8327, 'grad_norm': 0.3800980746746063, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 831520768}\n{'loss': 2.8298, 'grad_norm': 0.3349006772041321, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 832569344}\n{'loss': 2.9633, 'grad_norm': 0.3282972276210785, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 833617920}\n{'loss': 2.9234, 'grad_norm': 0.3283899128437042, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 834666496}\n{'loss': 2.9754, 'grad_norm': 0.33885031938552856, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 835715072}\n{'loss': 2.8825, 'grad_norm': 0.3113347589969635, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 836763648}\n{'loss': 2.9483, 'grad_norm': 0.3759271204471588, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 837812224}\n{'loss': 2.8577, 'grad_norm': 0.38608986139297485, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 838860800}\n{'loss': 2.6639, 'grad_norm': 0.3253604471683502, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 839909376}\n{'loss': 2.8295, 'grad_norm': 0.31234994530677795, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 840957952}\n{'loss': 2.9323, 'grad_norm': 0.37187162041664124, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 842006528}\n{'loss': 3.2357, 'grad_norm': 0.5417175889015198, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 843055104}\n{'loss': 2.8982, 'grad_norm': 0.6133915781974792, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 844103680}\n{'loss': 2.928, 'grad_norm': 0.7637872099876404, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 845152256}\n{'loss': 2.9283, 'grad_norm': 0.7322977781295776, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 846200832}\n{'loss': 2.8209, 'grad_norm': 0.5112255215644836, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 847249408}\n{'loss': 2.8696, 'grad_norm': 0.49990609288215637, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 848297984}\n{'loss': 2.9193, 'grad_norm': 0.4511178135871887, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 849346560}\n{'loss': 2.9658, 'grad_norm': 0.4653412997722626, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 850395136}\n{'loss': 2.889, 'grad_norm': 0.3913695812225342, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 851443712}\n{'loss': 2.9534, 'grad_norm': 0.39285045862197876, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 852492288}\n{'loss': 2.8341, 'grad_norm': 0.5052099227905273, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 853540864}\n{'loss': 3.0436, 'grad_norm': 0.5978823900222778, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 854589440}\n{'loss': 2.9484, 'grad_norm': 0.4584784507751465, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 855638016}\n{'loss': 2.8786, 'grad_norm': 0.40823692083358765, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 856686592}\n{'loss': 2.942, 'grad_norm': 0.4448293447494507, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 857735168}\n{'loss': 2.9347, 'grad_norm': 0.4112764596939087, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 858783744}\n{'loss': 2.8359, 'grad_norm': 0.3826068341732025, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 859832320}\n{'loss': 2.9277, 'grad_norm': 0.37165558338165283, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 860880896}\n{'loss': 2.6527, 'grad_norm': 0.4285834729671478, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 861929472}\n{'loss': 2.8451, 'grad_norm': 0.36497727036476135, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 862978048}\n{'loss': 2.9039, 'grad_norm': 0.35966625809669495, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 864026624}\n{'loss': 2.9268, 'grad_norm': 0.3529391586780548, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 865075200}\n{'loss': 2.9953, 'grad_norm': 0.3455546498298645, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 866123776}\n{'loss': 2.9307, 'grad_norm': 0.3788530230522156, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 867172352}\n{'loss': 2.9448, 'grad_norm': 0.35837656259536743, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 868220928}\n{'loss': 2.9937, 'grad_norm': 0.3842633366584778, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 869269504}\n{'loss': 2.8324, 'grad_norm': 0.32774215936660767, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 870318080}\n{'loss': 2.8613, 'grad_norm': 0.327158659696579, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 871366656}\n{'loss': 2.7653, 'grad_norm': 0.3515920639038086, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 872415232}\n[2025-03-11 02:50:38 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 939d1d36-c607-4d3c-a0a0-8e447579340b)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_165.jsonl.zst\n[2025-03-11 02:50:39 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-11 02:58:37 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 0b99bfd1-07ae-46db-81fa-fc6ef0eabdbc)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_1529.jsonl.zst\n[2025-03-11 02:58:37 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-11 03:00:11 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: c208d1bb-5d13-45d2-9a01-1d5a2defa598)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_4562.jsonl.zst\n[2025-03-11 03:00:11 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-11 03:01:14 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 2bf98b5c-473b-4e00-aca2-b152efddb992)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk3/example_holdout_4414.jsonl.zst\n[2025-03-11 03:01:14 WARNING] Retrying in 1s [Retry 1/5].\n{'eval_loss': 2.816462278366089, 'eval_runtime': 954.8041, 'eval_samples_per_second': 17.16, 'eval_steps_per_second': 0.134, 'epoch': 0.1, 'num_input_tokens_seen': 872415232}\n{'loss': 2.867, 'grad_norm': 0.3173666000366211, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 873463808}\n{'loss': 2.8701, 'grad_norm': 0.3399354815483093, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 874512384}\n{'loss': 2.8575, 'grad_norm': 0.36704689264297485, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 875560960}\n{'loss': 2.9582, 'grad_norm': 0.33231136202812195, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 876609536}\n{'loss': 2.7719, 'grad_norm': 0.34316956996917725, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 877658112}\n{'loss': 2.8915, 'grad_norm': 0.3483976423740387, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 878706688}\n{'loss': 2.7566, 'grad_norm': 0.3104913532733917, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 879755264}\n{'loss': 3.0013, 'grad_norm': 0.38844239711761475, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 880803840}\n{'loss': 2.5568, 'grad_norm': 0.40875244140625, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 881852416}\n{'loss': 2.8336, 'grad_norm': 0.3538399934768677, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 882900992}\n{'loss': 2.9391, 'grad_norm': 0.3494492471218109, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 883949568}\n{'loss': 2.8535, 'grad_norm': 0.3472343981266022, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 884998144}\n{'loss': 2.9836, 'grad_norm': 0.34867390990257263, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 886046720}\n{'loss': 2.8416, 'grad_norm': 0.3527415096759796, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 887095296}\n{'loss': 2.8756, 'grad_norm': 0.3338777422904968, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 888143872}\n{'loss': 2.8428, 'grad_norm': 0.3345812261104584, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 889192448}\n{'loss': 2.8977, 'grad_norm': 0.31487980484962463, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 890241024}\n{'loss': 2.9543, 'grad_norm': 0.3655254542827606, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 891289600}\n{'loss': 2.9423, 'grad_norm': 0.33075806498527527, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 892338176}\n{'loss': 2.9001, 'grad_norm': 0.34644609689712524, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 893386752}\n{'loss': 2.9029, 'grad_norm': 0.39070528745651245, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 894435328}\n{'loss': 2.9101, 'grad_norm': 0.39556533098220825, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 895483904}\n{'loss': 2.8119, 'grad_norm': 0.39002978801727295, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 896532480}\n{'loss': 3.0102, 'grad_norm': 0.37797507643699646, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 897581056}\n{'loss': 2.666, 'grad_norm': 0.4306756258010864, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 898629632}\n{'loss': 2.9257, 'grad_norm': 0.4526049494743347, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 899678208}\n{'loss': 2.8196, 'grad_norm': 0.3978416621685028, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 900726784}\n{'loss': 2.9057, 'grad_norm': 0.3925896883010864, 'learning_rate': 0.001, 'epoch': 0.1, 'num_input_tokens_seen': 901775360}\n{'loss': 3.0017, 'grad_norm': 0.45828214287757874, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 902823936}\n{'loss': 2.89, 'grad_norm': 0.4745008647441864, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 903872512}\n{'loss': 2.7335, 'grad_norm': 0.4270082116127014, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 904921088}\n{'loss': 2.8234, 'grad_norm': 0.38832950592041016, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 905969664}\n{'loss': 2.8618, 'grad_norm': 0.3907729387283325, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 907018240}\n{'loss': 2.8703, 'grad_norm': 0.368655264377594, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 908066816}\n{'loss': 2.8321, 'grad_norm': 0.41538506746292114, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 909115392}\n{'loss': 2.886, 'grad_norm': 0.41877180337905884, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 910163968}\n{'loss': 2.6224, 'grad_norm': 0.33238673210144043, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 911212544}\n{'loss': 2.8617, 'grad_norm': 0.4095931351184845, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 912261120}\n{'loss': 2.8172, 'grad_norm': 0.41708603501319885, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 913309696}\n{'loss': 2.7658, 'grad_norm': 0.37449270486831665, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 914358272}\n{'loss': 2.9042, 'grad_norm': 0.3935737609863281, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 915406848}\n{'loss': 2.7612, 'grad_norm': 0.3586251735687256, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 916455424}\n{'loss': 2.8785, 'grad_norm': 0.3712047338485718, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 917504000}\n{'loss': 2.739, 'grad_norm': 0.37707045674324036, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 918552576}\n{'loss': 2.8372, 'grad_norm': 0.3432702422142029, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 919601152}\n{'loss': 2.5638, 'grad_norm': 0.3493041396141052, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 920649728}\n{'loss': 2.8759, 'grad_norm': 0.3401539623737335, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 921698304}\n{'loss': 3.0048, 'grad_norm': 0.4632040858268738, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 922746880}\n{'loss': 2.9394, 'grad_norm': 0.4968065023422241, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 923795456}\n{'loss': 2.8441, 'grad_norm': 0.5426673889160156, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 924844032}\n{'loss': 2.9975, 'grad_norm': 0.4630672037601471, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 925892608}\n{'loss': 2.9584, 'grad_norm': 0.38806748390197754, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 926941184}\n{'loss': 2.8904, 'grad_norm': 0.39797642827033997, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 927989760}\n{'loss': 2.5774, 'grad_norm': 0.4063512980937958, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 929038336}\n{'loss': 2.812, 'grad_norm': 0.3161136209964752, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 930086912}\n{'loss': 2.7483, 'grad_norm': 0.3628361225128174, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 931135488}\n{'loss': 2.7916, 'grad_norm': 0.37376269698143005, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 932184064}\n{'loss': 2.7985, 'grad_norm': 0.3399117887020111, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 933232640}\n{'loss': 2.7107, 'grad_norm': 0.3453179597854614, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 934281216}\n{'loss': 2.9254, 'grad_norm': 0.39461833238601685, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 935329792}\n{'loss': 2.8487, 'grad_norm': 0.3668413460254669, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 936378368}\n{'loss': 2.7928, 'grad_norm': 0.28304487466812134, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 937426944}\n{'loss': 2.8503, 'grad_norm': 0.35816267132759094, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 938475520}\n{'loss': 3.0328, 'grad_norm': 0.3540339469909668, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 939524096}\n[2025-03-11 03:46:08 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: 3b8321b9-2d88-4bfa-9eca-b201c444cba3)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk5/example_holdout_405.jsonl.zst\n[2025-03-11 03:46:08 WARNING] Retrying in 1s [Retry 1/5].\n[2025-03-11 03:53:27 WARNING] '(ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: a98a238a-c0a4-4295-8502-316a89a7ae29)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk1/example_holdout_2524.jsonl.zst\n[2025-03-11 03:53:27 WARNING] Retrying in 1s [Retry 1/5].\n{'eval_loss': 2.7651162147521973, 'eval_runtime': 687.962, 'eval_samples_per_second': 23.815, 'eval_steps_per_second': 0.186, 'epoch': 0.11, 'num_input_tokens_seen': 939524096}\n{'loss': 2.9368, 'grad_norm': 0.34962671995162964, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 940572672}\n{'loss': 2.3627, 'grad_norm': 0.37516310811042786, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 941621248}\n{'loss': 2.8854, 'grad_norm': 0.3487492501735687, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 942669824}\n{'loss': 2.7892, 'grad_norm': 0.37180987000465393, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 943718400}\n{'loss': 2.8067, 'grad_norm': 0.3387952744960785, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 944766976}\n{'loss': 2.841, 'grad_norm': 0.32076528668403625, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 945815552}\n{'loss': 2.7965, 'grad_norm': 0.3348572552204132, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 946864128}\n{'loss': 2.6788, 'grad_norm': 0.3531329929828644, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 947912704}\n{'loss': 2.7276, 'grad_norm': 0.300353467464447, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 948961280}\n{'loss': 2.8189, 'grad_norm': 0.3258875012397766, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 950009856}\n{'loss': 2.8388, 'grad_norm': 0.3434987962245941, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 951058432}\n{'loss': 2.856, 'grad_norm': 0.33045029640197754, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 952107008}\n{'loss': 2.658, 'grad_norm': 0.34896957874298096, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 953155584}\n{'loss': 2.8484, 'grad_norm': 0.3819083273410797, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 954204160}\n{'loss': 2.8402, 'grad_norm': 0.39541998505592346, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 955252736}\n{'loss': 2.8281, 'grad_norm': 0.3843367397785187, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 956301312}\n{'loss': 2.8339, 'grad_norm': 0.4067714214324951, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 957349888}\n{'loss': 2.8693, 'grad_norm': 0.3071018159389496, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 958398464}\n{'loss': 2.6747, 'grad_norm': 0.3676702380180359, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 959447040}\n{'loss': 2.6961, 'grad_norm': 0.357799232006073, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 960495616}\n{'loss': 2.7944, 'grad_norm': 0.318391352891922, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 961544192}\n{'loss': 2.8084, 'grad_norm': 0.32000190019607544, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 962592768}\n{'loss': 2.8024, 'grad_norm': 0.3250137269496918, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 963641344}\n{'loss': 2.7951, 'grad_norm': 0.33021438121795654, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 964689920}\n{'loss': 2.8069, 'grad_norm': 0.3257495164871216, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 965738496}\n{'loss': 2.8148, 'grad_norm': 0.3608018159866333, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 966787072}\n[2025-03-11 04:13:12 WARNING] '(ProtocolError('Connection aborted.', RemoteDisconnected('Remote end closed connection without response')), '(Request ID: 36a7cc72-4605-416a-8742-59488d719150)')' thrown while requesting GET https://huggingface.co/datasets/cerebras/SlimPajama-627B/resolve/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk1/example_train_5267.jsonl.zst\n[2025-03-11 04:13:12 WARNING] Retrying in 1s [Retry 1/5].\n{'loss': 2.8089, 'grad_norm': 0.3657573163509369, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 967835648}\n{'loss': 2.8243, 'grad_norm': 0.3791966736316681, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 968884224}\n{'loss': 2.6837, 'grad_norm': 0.4036826193332672, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 969932800}\n{'loss': 2.6694, 'grad_norm': 0.34643635153770447, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 970981376}\n{'loss': 2.8455, 'grad_norm': 0.35321497917175293, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 972029952}\n{'loss': 2.5156, 'grad_norm': 0.3488744795322418, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 973078528}\n{'loss': 2.7185, 'grad_norm': 0.33396172523498535, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 974127104}\n{'loss': 2.856, 'grad_norm': 0.36425134539604187, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 975175680}\n{'loss': 2.7639, 'grad_norm': 0.34361588954925537, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 976224256}\n{'loss': 2.7777, 'grad_norm': 0.45501893758773804, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 977272832}\n{'loss': 2.8692, 'grad_norm': 0.4391760230064392, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 978321408}\n{'loss': 2.7885, 'grad_norm': 0.385729044675827, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 979369984}\n{'loss': 2.8622, 'grad_norm': 0.4122815728187561, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 980418560}\n{'loss': 2.674, 'grad_norm': 0.3223947584629059, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 981467136}\n{'loss': 2.7148, 'grad_norm': 0.39820024371147156, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 982515712}\n{'loss': 2.6975, 'grad_norm': 0.38311144709587097, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 983564288}\n{'loss': 2.8515, 'grad_norm': 0.4324709177017212, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 984612864}\n{'loss': 2.5684, 'grad_norm': 0.3579341471195221, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 985661440}\n{'loss': 2.9478, 'grad_norm': 0.4081536531448364, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 986710016}\n{'loss': 2.7375, 'grad_norm': 0.4332145154476166, 'learning_rate': 0.001, 'epoch': 0.11, 'num_input_tokens_seen': 987758592}\n{'loss': 2.7773, 'grad_norm': 0.43510711193084717, 'learning_rate': 0.001, 'epoch': 0.12, 'num_input_tokens_seen': 988807168}\n...\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/file_utils.py\", line 1378, in _iter_from_urlpaths\n raise FileNotFoundError(urlpath)\nFileNotFoundError: zstd://example_train_1215.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk9/example_train_1215.jsonl.zst\n```\n\n</details>", "Two more today:\n```python\nFileNotFoundError: zstd://example_holdout_5012.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk4/example_holdout_5012.jsonl.zst\n```\nand\n```python\nFileNotFoundError: zstd://example_holdout_3073.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/validation/chunk2/example_holdout_3073.jsonl.zst\n```\nboth of which exist on the hub ([here](https://huggingface.co/datasets/cerebras/SlimPajama-627B/blob/main/validation/chunk4/example_holdout_5012.jsonl.zst) and [here](https://huggingface.co/datasets/cerebras/SlimPajama-627B/blob/main/validation/chunk2/example_holdout_3073.jsonl.zst)).", "I also observe the same thing when using streaming with DCLM dataset with 64 GPUs. I have tried ```export HF_DATASETS_STREAMING_PARALLELISM=1``` but doesn't help.", "Another error today, this time a 504 gateway timeout `HfHubHTTPError`. I have no idea if this is related, but I suspect that it is considering the setup is identical. Notably though, the two errors I posted yesterday were for evaluation (hence the `holdout` in the URLs) whereas today there was no problem doing that first evaluation, but now the `train` split failed.\n```python\n...\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 2226, in __iter__\n for key, example in ex_iterable:\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1499, in __iter__\n for x in self.ex_iterable:\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1067, in __iter__\n yield from self._iter()\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1231, in _iter\n for key, transformed_example in iter_outputs():\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1207, in iter_outputs\n for i, key_example in inputs_iterator:\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1111, in iter_inputs\n for key, example in iterator:\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 371, in __iter__\n for key, pa_table in self.generate_tables_fn(**gen_kwags):\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/packaged_modules/json/json.py\", line 114, in _generate_tables\n with open(file, \"rb\") as f:\n ^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/streaming.py\", line 75, in wrapper\n return function(*args, download_config=download_config, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/file_utils.py\", line 948, in xopen\n file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/core.py\", line 147, in open\n return self.__enter__()\n ^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/core.py\", line 105, in __enter__\n f = self.fs.open(self.path, mode=mode)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/spec.py\", line 1301, in open\n f = self._open(\n ^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/filesystems/compression.py\", line 85, in _open\n return self._open_with_fsspec().open()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/core.py\", line 147, in open\n return self.__enter__()\n ^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/core.py\", line 105, in __enter__\n f = self.fs.open(self.path, mode=mode)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/fsspec/spec.py\", line 1301, in open\n f = self._open(\n ^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_file_system.py\", line 234, in _open\n return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_file_system.py\", line 691, in __init__\n self.details = fs.info(self.resolved_path.unresolve(), expand_info=False)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_file_system.py\", line 524, in info\n self.ls(parent_path, expand_info=False)\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_file_system.py\", line 284, in ls\n out = self._ls_tree(path, refresh=refresh, revision=revision, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_file_system.py\", line 375, in _ls_tree\n for path_info in tree:\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 3080, in list_repo_tree\n for path_info in paginate(path=tree_url, headers=headers, params={\"recursive\": recursive, \"expand\": expand}):\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/utils/_pagination.py\", line 46, in paginate\n hf_raise_for_status(r)\n File \"/miniconda3/envs/draft/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 477, in hf_raise_for_status\n raise _format(HfHubHTTPError, str(e), response) from e\nhuggingface_hub.errors.HfHubHTTPError: 504 Server Error: Gateway Time-out for url: https://huggingface.co/api/datasets/cerebras/SlimPajama-627B/tree/2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train%2Fchunk8?recursive=False&expand=False&cursor=ZXlKbWFXeGxYMjVoYldVaU9pSjBjbUZwYmk5amFIVnVhemd2WlhoaGJYQnNaVjkwY21GcGJsOHpOams0TG1wemIyNXNMbnB6ZENKOTozMDAw\n```", "Another one today:\n```python\nFileNotFoundError: zstd://example_train_4985.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk5/example_train_4985.jsonl.zst\n```", "This is a constant issue, and has been for six months, at least. Currently, half of my streaming datasets are failing with errors like this.\n\nMuennighoff/natural-instructions:\n```\n File \"/home/crow/repos/praxis/.venv/lib/python3.13/site-packages/datasets/utils/file_utils.py\", line 1379, in _iter_from_urlpaths\n raise FileNotFoundError(urlpath)\nFileNotFoundError: hf://datasets/Muennighoff/natural-instructions@a29a9757125f4bb1c26445ad0d2ef7d9b2cc9c4c/train/task343_winomt_classification_profession_anti_train.jsonl\n```\nopen-phi/textbooks:\n```\n File \"/home/crow/repos/praxis/.venv/lib/python3.13/site-packages/datasets/utils/file_utils.py\", line 1379, in _iter_from_urlpaths\n raise FileNotFoundError(urlpath)\nFileNotFoundError: hf://datasets/open-phi/textbooks@292aaae99cbecacad50f692d7327887f05dacaf2/data/train-00000-of-00001-b513d9e388d56453.parquet\n```\nHuggingFaceTB/smoltalk:\n```\n File \"/home/crow/repos/praxis/.venv/lib/python3.13/site-packages/datasets/utils/file_utils.py\", line 1379, in _iter_from_urlpaths\n raise FileNotFoundError(urlpath)\nFileNotFoundError: hf://datasets/HuggingFaceTB/smoltalk@5feaf2fd3ffca7c237fc38d1861bc30365d48ffa/data/all/train-00003-of-00009.parquet\n```" ]
2025-03-07T19:14:18Z
2025-04-17T23:40:35Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug In https://github.com/huggingface/datasets/issues/6843 it was noted that the streaming feature of `datasets` is highly susceptible to outages and doesn't back off for long (or even *at all*). I was training a model while streaming SlimPajama and training crashed with a `FileNotFoundError`. I can only assume that this was due to a momentary outage considering the file in question, `train/chunk9/example_train_3889.jsonl.zst`, [exists like all other files in SlimPajama](https://huggingface.co/datasets/cerebras/SlimPajama-627B/blob/main/train/chunk9/example_train_3889.jsonl.zst). ```python ... File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 2226, in __iter__ for key, example in ex_iterable: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1499, in __iter__ for x in self.ex_iterable: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1067, in __iter__ yield from self._iter() File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1231, in _iter for key, transformed_example in iter_outputs(): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1207, in iter_outputs for i, key_example in inputs_iterator: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 1111, in iter_inputs for key, example in iterator: File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/iterable_dataset.py", line 371, in __iter__ for key, pa_table in self.generate_tables_fn(**gen_kwags): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/packaged_modules/json/json.py", line 99, in _generate_tables for file_idx, file in enumerate(itertools.chain.from_iterable(files)): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/track.py", line 50, in __iter__ for x in self.generator(*self.args): File "/miniconda3/envs/draft/lib/python3.11/site-packages/datasets/utils/file_utils.py", line 1378, in _iter_from_urlpaths raise FileNotFoundError(urlpath) FileNotFoundError: zstd://example_train_3889.jsonl::hf://datasets/cerebras/SlimPajama-627B@2d0accdd58c5d5511943ca1f5ff0e3eb5e293543/train/chunk9/example_train_3889.jsonl.zst ``` That final `raise` is at the bottom of the following snippet: https://github.com/huggingface/datasets/blob/f693f4e93aabafa878470c80fd42ddb10ec550d6/src/datasets/utils/file_utils.py#L1354-L1379 So clearly, something choked up in `xisfile`. ### Steps to reproduce the bug This happens when streaming a dataset and iterating over it. In my case, that iteration is done in Trainer's `inner_training_loop`, but this is not relevant to the iterator. ```python File "/miniconda3/envs/draft/lib/python3.11/site-packages/accelerate/data_loader.py", line 835, in __iter__ next_batch, next_batch_info = self._fetch_batches(main_iterator) ``` ### Expected behavior This bug and the linked issue have one thing in common: *when streaming fails to retrieve an example, the entire program gives up and crashes*. As users, we cannot even protect ourselves from this: when we are iterating over a dataset, we can't make `datasets` skip over a bad example or wait a little longer to retry the iteration, because when a Python generator/iterator raises an error, it loses all its context. In other words: if you have something that looks like `for b in a: for c in b: for d in c:`, errors in the innermost loop can only be caught by a `try ... except` in `c.__iter__()`. There should be such exception handling in `datasets` and it should have a **configurable exponential back-off**: first wait and retry after 1 minute, then 2 minutes, then 4 minutes, then 8 minutes, ... and after a given amount of retries, **skip the bad example**, and **only after** skipping a given amount of examples, give up and crash. This was requested in https://github.com/huggingface/datasets/issues/6843 too, since currently there is only linear backoff *and* it is clearly not applied to `xisfile`. ### Environment info - `datasets` version: 3.3.2 *(the latest version)* - Platform: Linux-4.18.0-513.24.1.el8_9.x86_64-x86_64-with-glibc2.28 - Python version: 3.11.7 - `huggingface_hub` version: 0.26.5 - PyArrow version: 15.0.0 - Pandas version: 2.2.0 - `fsspec` version: 2024.10.0
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7440/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7440/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5331
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5331/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5331/comments
https://api.github.com/repos/huggingface/datasets/issues/5331/events
https://github.com/huggingface/datasets/pull/5331
1,473,146,738
PR_kwDODunzps5EKDpr
5,331
Support for multiple configs in packaged modules via metadata yaml info
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "feel free to merge `main` into your PR to fix the CI :)", "Let me see if I can fix the pattern thing ^^'", "Hmm I think it would be easier to specify the `data_files` in the end, because having a split pattern like `{split}-...` at the root of the repository can lead to unexpected behaviors IMO, and we probably don't want to have a different behavior for `data_files` depending if it's inside a `data_dir` or not\r\n\r\nMaybe something like\r\n```yaml\r\nbuilder_config:\r\n data_dir: data_dir\r\n data_files:\r\n - split: train\r\n pattern: train-[0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*\r\n```", " > Also, I'm not sure if it's a good idea to have this field in the YAML metadata - Transformers use this part of the card only for Hub-related stuff (widgets, tags, CO2 emission, etc.), and I think we should aim to do the same in Datasets. We could achieve this by having these kwargs in a special file (they can be seen as a faster way of defining a builder (builder script) that subclasses a packaged builder) and removing the dataset_info field (the only useful info there seem to be features and we can fetch those directly from a dataset script/Parquet files).\r\n\r\nSomething like `config.json`?\r\n\r\n```json\r\n{\r\n \"data_dir\": \"data\"\r\n \"data_files\": {\r\n \"train\": \"train-[0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*\"\r\n }\r\n}\r\n```\r\n\r\nwe could also support lists for several configs", "opened https://github.com/huggingface/datasets/issues/5694", "I opened a PR to this PR to add data_files in YAML: https://github.com/polinaeterna/datasets/pull/1\r\n\r\n```yaml\r\nbuilder_config:\r\n data_files:\r\n - split: train\r\n pattern: data/train-*\r\n```", "Let me open a PR to see if I can move the data files resolution outside of the MetadataConfigs to not modify it in-place", "I wonder if we can make the cache backward compatible: we could just check if the cache directory with the old path exists. It will be useful for the research team which has a big datasets cache", "> I wonder if we can make the cache backward compatible: we could just check if the cache directory with the old path exists. It will be useful for the research team which has a big datasets cache\r\n\r\n![image](https://github.com/huggingface/datasets/assets/16348744/90a96e79-2a0d-4d37-95bd-b75fa962c094)\r\n\r\nIn the next PR maybe? :D \r\nIt's possible but requires some additional logic to correctly pass old `config_kwargs` (which used to include `data_files` but now it's `None` for builders from metadata) to generate the hash which is used to create the path.", "If we only consider datasets that were pushed to hub, it's just a matter of using `\"{username}__parquet\"` instead of `\"{username}__{dataset_name}\"` in the cache directory name. The hashes stay the same :)\r\n\r\nEDIT: and the config name\r\nEDIT2: and the arrow file names", "Did a small PR for backward compatibility, it was easy to add in the end: https://github.com/polinaeterna/datasets/pull/3", "Just created a branch [dev-3.0](https://github.com/huggingface/datasets/tree/dev-3.0) in which we can merge this one and the other datasets 3.0 related PRs", "@lhoestq why can't we merge it in main?", "We can, it was just in case we had other things to merge after @mariosasko or @albertvillanova 's reviews", "@lhoestq @albertvillanova @mariosasko we agreed on having `configs` (in plural) as a metadata field in readme but apparently Hub's yaml validation doesn't allow it to be not a list :D \r\n![image](https://github.com/huggingface/datasets/assets/16348744/52131ee8-80e0-4f6e-90cd-8ff83caf4625)\r\n(with `config` (in singular) it works)\r\n\r\nedit: and now the tests for hub datasets with metadata configs are failing because I cannot change the yaml there...", "> we agreed on having configs (in plural) as a metadata field in readme but apparently Hub's yaml validation doesn't allow it to be not a list :D\r\n\r\nIf the `configs` field is specified in the YAML, the Hub can use it to [improve](https://github.com/huggingface/moon-landing/blob/97aca4cac32fbb7d84ce5eba9b18afad87968c4a/server/views/components/DatasetLibraryModal/datasetLibrarySnippets.ts#L11) the `Use in dataset library` snippet by listing the possible config values in `load_dataset`. So I think this needs to be fixed on the Hub side.\r\n\r\nPS: I couldn't find an instance of someone using this field on the Hub, so I think using it for this feature is OK.", "> I couldn't find an instance of someone using this field on the Hub, so I think using it for this feature is OK.\r\n\r\n@mariosasko I think it's because @lhoestq renamed `configs` to `config_names` in all canonical datasets :D so yes, `configs` field is now supposed to include custom configuration parameters introduced in this PR, and `config_names` is used (not really used lol) for list of strings of config names. It's being fixed on the Hub's side https://github.com/huggingface/moon-landing/pull/6490", "after more thought I agree it's maybe overkill to do a major release for this one, since we have a good backward compatibility", "There is one edge case I forgot to mention in the reviews - I think it's a good idea to support passing config params that are functions (Pandas uses them a lot) using this API (e.g. `converters` in the CSV config for converting a string column into a sequence). I see two solutions: string blocks with Python code in YAML or PyYAML [tags](https://pyyaml.org/wiki/PyYAMLDocumentation#yaml-tags-and-python-types). \r\n\r\nBut I think this can be addressed later.", "I'm resolving the conflicts and writing some docs :) let's merge this soon !", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005868 / 0.011353 (-0.005485) | 0.003544 / 0.011008 (-0.007464) | 0.080329 / 0.038508 (0.041821) | 0.061072 / 0.023109 (0.037963) | 0.307802 / 0.275898 (0.031904) | 0.340353 / 0.323480 (0.016873) | 0.004665 / 0.007986 (-0.003321) | 0.002779 / 0.004328 (-0.001550) | 0.062065 / 0.004250 (0.057815) | 0.046350 / 0.037052 (0.009297) | 0.312045 / 0.258489 (0.053556) | 0.353524 / 0.293841 (0.059683) | 0.026965 / 0.128546 (-0.101581) | 0.007906 / 0.075646 (-0.067740) | 0.260678 / 0.419271 (-0.158593) | 0.044167 / 0.043533 (0.000634) | 0.309757 / 0.255139 (0.054618) | 0.340188 / 0.283200 (0.056988) | 0.020440 / 0.141683 (-0.121243) | 1.486886 / 1.452155 (0.034732) | 1.548330 / 1.492716 (0.055614) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.188658 / 0.018006 (0.170652) | 0.422204 / 0.000490 (0.421715) | 0.003508 / 0.000200 (0.003308) | 0.000068 / 0.000054 (0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025173 / 0.037411 (-0.012238) | 0.072868 / 0.014526 (0.058343) | 0.084817 / 0.176557 (-0.091739) | 0.151667 / 0.737135 (-0.585468) | 0.085632 / 0.296338 (-0.210706) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.400998 / 0.215209 (0.185789) | 4.022274 / 2.077655 (1.944619) | 2.025768 / 1.504120 (0.521648) | 1.874193 / 1.541195 (0.332998) | 2.006537 / 1.468490 (0.538047) | 0.501799 / 4.584777 (-4.082978) | 2.987487 / 3.745712 (-0.758225) | 4.552295 / 5.269862 (-0.717566) | 2.775859 / 4.565676 (-1.789817) | 0.057596 / 0.424275 (-0.366679) | 0.006449 / 0.007607 (-0.001158) | 0.470776 / 0.226044 (0.244732) | 4.725933 / 2.268929 (2.457005) | 2.480130 / 55.444624 (-52.964494) | 2.183919 / 6.876477 (-4.692558) | 2.408052 / 2.142072 (0.265979) | 0.584038 / 4.805227 (-4.221190) | 0.124964 / 6.500664 (-6.375701) | 0.060939 / 0.075469 (-0.014530) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.221263 / 1.841788 (-0.620524) | 18.326372 / 8.074308 (10.252064) | 13.398937 / 10.191392 (3.207545) | 0.149153 / 0.680424 (-0.531271) | 0.016941 / 0.534201 (-0.517260) | 0.332106 / 0.579283 (-0.247177) | 0.339958 / 0.434364 (-0.094406) | 0.378125 / 0.540337 (-0.162212) | 0.517787 / 1.386936 (-0.869149) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005927 / 0.011353 (-0.005426) | 0.003607 / 0.011008 (-0.007402) | 0.062925 / 0.038508 (0.024417) | 0.058676 / 0.023109 (0.035566) | 0.362129 / 0.275898 (0.086231) | 0.395864 / 0.323480 (0.072384) | 0.004652 / 0.007986 (-0.003334) | 0.002893 / 0.004328 (-0.001435) | 0.062696 / 0.004250 (0.058445) | 0.049988 / 0.037052 (0.012935) | 0.365366 / 0.258489 (0.106877) | 0.412326 / 0.293841 (0.118485) | 0.027118 / 0.128546 (-0.101429) | 0.008179 / 0.075646 (-0.067467) | 0.068048 / 0.419271 (-0.351223) | 0.041065 / 0.043533 (-0.002468) | 0.359858 / 0.255139 (0.104719) | 0.386589 / 0.283200 (0.103390) | 0.020467 / 0.141683 (-0.121216) | 1.438070 / 1.452155 (-0.014084) | 1.479617 / 1.492716 (-0.013099) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.231516 / 0.018006 (0.213510) | 0.413407 / 0.000490 (0.412917) | 0.000358 / 0.000200 (0.000158) | 0.000052 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026071 / 0.037411 (-0.011340) | 0.076486 / 0.014526 (0.061960) | 0.085943 / 0.176557 (-0.090613) | 0.138087 / 0.737135 (-0.599048) | 0.087466 / 0.296338 (-0.208872) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.417711 / 0.215209 (0.202502) | 4.171915 / 2.077655 (2.094260) | 2.140677 / 1.504120 (0.636557) | 1.960164 / 1.541195 (0.418969) | 2.002134 / 1.468490 (0.533644) | 0.499699 / 4.584777 (-4.085078) | 2.991814 / 3.745712 (-0.753898) | 2.906589 / 5.269862 (-2.363272) | 1.842305 / 4.565676 (-2.723372) | 0.057633 / 0.424275 (-0.366642) | 0.006465 / 0.007607 (-0.001142) | 0.492874 / 0.226044 (0.266830) | 4.931613 / 2.268929 (2.662684) | 2.623161 / 55.444624 (-52.821463) | 2.310624 / 6.876477 (-4.565853) | 2.483146 / 2.142072 (0.341074) | 0.586910 / 4.805227 (-4.218317) | 0.124681 / 6.500664 (-6.375983) | 0.061561 / 0.075469 (-0.013908) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.319111 / 1.841788 (-0.522677) | 18.637326 / 8.074308 (10.563018) | 13.803912 / 10.191392 (3.612520) | 0.143989 / 0.680424 (-0.536435) | 0.017025 / 0.534201 (-0.517176) | 0.333156 / 0.579283 (-0.246127) | 0.342163 / 0.434364 (-0.092201) | 0.380357 / 0.540337 (-0.159981) | 0.512261 / 1.386936 (-0.874675) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f49a16346dc35e5eabeec39778d0f2e4e850dfd7 \"CML watermark\")\n" ]
2022-12-02T16:43:44Z
2023-07-24T15:49:54Z
2023-07-13T13:27:56Z
CONTRIBUTOR
null
null
null
will solve https://github.com/huggingface/datasets/issues/5209 and https://github.com/huggingface/datasets/issues/5151 and many other... Config parameters for packaged builders are parsed from `“builder_config”` field in README.md file (separate firs-level field, not part of “dataset_info”), example: ```yaml --- dataset_info: ... configs: - config_name: v1 data_dir: v1 drop_labels: true - config_name: v2 data_dir: v2 drop_labels: false ``` I tried to align packaged builders with custom configs parsed from metadata with scripts dataset builder as much as possible. Their builders are created dynamically (see `configure_builder_class()` in load.py`) and have `BUILDER_CONFIGS` attribute filled with `BuilderConfig` objects in the same way as for datasets with script. ## load_dataset 1. If there is single config in meta and it doesn’t have a name, the name becomes “default” (as we do for “dataset_info”), [example](https://huggingface.co/datasets/polinaeterna/audiofolder_one_default_config_in_metadata/blob/main/README.md): ```python load_dataset("ds") == load_dataset("ds", "default") # load with the params provided in metadata load_dataset("ds", "random name") # ValueError: BuilderConfig 'random_name' not found. Available: ['default'] ``` 2. If there is single config in metadata with `config_name` provided, it becomes a default one (loaded when no `config_name` is specified, [example](https://huggingface.co/datasets/polinaeterna/audiofolder_one_nondefault_config_in_metadata) ```python load_dataset("ds") == load_dataset("ds", "custom") # load with the params provided in meta load_dataset("ds", "random name") # ValueError: BuilderConfig 'random_name' not found. Available: ['custom'] ``` 3. If there are several configs in metadata with names [example](https://huggingface.co/datasets/polinaeterna/audiofolder_two_configs_in_metadata/blob/main/README.md) ```python load_dataset("ds", "v1") # load with "v1" params load_dataset("ds", "v2") # load with "v2" params load_dataset("ds") # ValueError: BuilderConfig 'default' not found. Available: ['v1', 'v2'] ``` Thanks to @lhoestq and [this change](https://github.com/polinaeterna/datasets/pull/1), it's possible to add `"default"` field in yaml and set it to True, to make the config a default one (loaded when no config is specified): ```yaml configs: - config_name: v1 drop_labels: true default: true - config_name: v2 ... ``` then `load_dataset("ds") == load_dataset("ds", "v1")`. ## dataset_name and cache I decided that it’s reasonable to add a `dataset_name` attribute to `DatasetBuilder` class which would be equal to `name` for scripts dataset but reflect a real dataset name for packaged builders (last part of path/name from hub). This is mostly to reorganize cache structure (I believe we can do this in the major release?) because otherwise, with custom configs for packaged builders which were all stored in the same directory, i it was becoming a mess. And in general it makes much more sense like this, from datasets server perspective too, though it’s a breaking change So the cache dir has the following structure: `"{namespace__}<dataset_name>/<config_name>/<version>/<hash>/"` and arrow/parquet filenames are also `"<dataset_name>-<split>.arrow"`. For example `polinaeterna___audiofolder_two_configs_in_metadata/v1-5532fac9443ea252/0.0.0/6cbdd16f8688354c63b4e2a36e1585d05de285023ee6443ffd71c4182055c0fc/` for `polinaeterna/audiofolder_two_configs_in_metadata` Hub dataset, train arrow file is `audiofolder_two_configs_in_metadata-train.arrow`. For script datasets it remains unchanged. ## push_to_hub To support custom configs with `push_to_hub`, the data is put under directory named either as `<config_name>` if `config_name` is **not** "default" or "data" if `config_name` is omitted or "default" (for backward compatibility). `"builder_config"` field is added to README.md, with `config_name` (optional) and `data_files` fields. for `"data_files"`, `"pattern"` parameter is introduced, to resolve data files correctly, see https://github.com/polinaeterna/datasets/pull/1. - `ds.push_to_hub("ds")` --> one config ("default"), put under "data" directory, [example](https://huggingface.co/datasets/polinaeterna/push_to_hub_single_config/blob/main/README.md) ```yaml dataset_info: ... configs: data_files: - split: train pattern: data/train-* ... ``` - `ds.push_to_hub("ds", "custom")` --> put under "custom" directory, [example](https://huggingface.co/datasets/polinaeterna/push_to_hub_singe_nondefault_config/blob/main/README.md) ```yaml configs: config_name: custom data_files: - split: train path: custom/train-* ... ``` - for many configs, [example](https://huggingface.co/datasets/polinaeterna/push_to_hub_many_configs/blob/main/README.md): ```yaml configs: - config_name: v1 data_files: - split: train path: v1/train-* ... - config_name: v2 data_files: - split: train path: v2/train-* ... ``` Thanks to @lhoestq and https://github.com/polinaeterna/datasets/pull/1, when pushing to datasets created **before** this change, README.md is updated accordingly (config for old data is added along with the one that is being pushed). `"dataset_info"` yaml field is updated accordingly (new configs are added). This shouldn't break anything! TODO in separate PRs: - [x] docs - [ ] probably update test cli util (make --save_info not rewrite `builder_config` in readme)
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5331/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5331/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5331.diff", "html_url": "https://github.com/huggingface/datasets/pull/5331", "merged_at": "2023-07-13T13:27:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/5331.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5331" }
https://api.github.com/repos/huggingface/datasets/issues/4738
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4738/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4738/comments
https://api.github.com/repos/huggingface/datasets/issues/4738/events
https://github.com/huggingface/datasets/pull/4738
1,315,222,166
PR_kwDODunzps479hq4
4,738
Use CI unit/integration tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I think this PR can be merged. Willing to see it in action.\r\n\r\nCC: @lhoestq " ]
2022-07-22T16:48:00Z
2022-07-26T20:19:22Z
2022-07-26T20:07:05Z
MEMBER
null
null
null
This PR: - Implements separate unit/integration tests - A fail in integration tests does not cancel the rest of the jobs - We should implement more robust integration tests: work in progress in a subsequent PR - For the moment, test involving network requests are marked as integration: to be evolved
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4738/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4738/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4738.diff", "html_url": "https://github.com/huggingface/datasets/pull/4738", "merged_at": "2022-07-26T20:07:05Z", "patch_url": "https://github.com/huggingface/datasets/pull/4738.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4738" }
https://api.github.com/repos/huggingface/datasets/issues/6062
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6062/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6062/comments
https://api.github.com/repos/huggingface/datasets/issues/6062/events
https://github.com/huggingface/datasets/pull/6062
1,818,341,584
PR_kwDODunzps5WOj62
6,062
Improve `Dataset.from_list` docstring
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008340 / 0.011353 (-0.003013) | 0.005053 / 0.011008 (-0.005955) | 0.103294 / 0.038508 (0.064786) | 0.069417 / 0.023109 (0.046308) | 0.436922 / 0.275898 (0.161024) | 0.461348 / 0.323480 (0.137868) | 0.006030 / 0.007986 (-0.001955) | 0.003727 / 0.004328 (-0.000601) | 0.076384 / 0.004250 (0.072134) | 0.056742 / 0.037052 (0.019689) | 0.439996 / 0.258489 (0.181507) | 0.469417 / 0.293841 (0.175577) | 0.044343 / 0.128546 (-0.084203) | 0.012634 / 0.075646 (-0.063013) | 0.359746 / 0.419271 (-0.059525) | 0.064842 / 0.043533 (0.021309) | 0.425960 / 0.255139 (0.170821) | 0.458568 / 0.283200 (0.175368) | 0.039802 / 0.141683 (-0.101881) | 1.687320 / 1.452155 (0.235165) | 1.806212 / 1.492716 (0.313496) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.255484 / 0.018006 (0.237478) | 0.563039 / 0.000490 (0.562549) | 0.000445 / 0.000200 (0.000245) | 0.000076 / 0.000054 (0.000022) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027511 / 0.037411 (-0.009900) | 0.089185 / 0.014526 (0.074659) | 0.098397 / 0.176557 (-0.078160) | 0.163897 / 0.737135 (-0.573238) | 0.099905 / 0.296338 (-0.196434) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.612737 / 0.215209 (0.397528) | 6.209948 / 2.077655 (4.132294) | 2.756060 / 1.504120 (1.251940) | 2.402115 / 1.541195 (0.860920) | 2.422665 / 1.468490 (0.954175) | 0.834799 / 4.584777 (-3.749977) | 5.251699 / 3.745712 (1.505986) | 5.554141 / 5.269862 (0.284280) | 3.254699 / 4.565676 (-1.310977) | 0.095697 / 0.424275 (-0.328578) | 0.009406 / 0.007607 (0.001799) | 0.729025 / 0.226044 (0.502980) | 7.195521 / 2.268929 (4.926593) | 3.360264 / 55.444624 (-52.084361) | 2.696764 / 6.876477 (-4.179713) | 2.702796 / 2.142072 (0.560724) | 0.974420 / 4.805227 (-3.830808) | 0.195215 / 6.500664 (-6.305450) | 0.069754 / 0.075469 (-0.005715) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.553458 / 1.841788 (-0.288330) | 21.972436 / 8.074308 (13.898128) | 20.027392 / 10.191392 (9.836000) | 0.216950 / 0.680424 (-0.463474) | 0.032196 / 0.534201 (-0.502005) | 0.449884 / 0.579283 (-0.129399) | 0.586213 / 0.434364 (0.151849) | 0.537227 / 0.540337 (-0.003111) | 0.751022 / 1.386936 (-0.635914) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007859 / 0.011353 (-0.003493) | 0.004762 / 0.011008 (-0.006246) | 0.086023 / 0.038508 (0.047515) | 0.069218 / 0.023109 (0.046109) | 0.449312 / 0.275898 (0.173414) | 0.481687 / 0.323480 (0.158207) | 0.006318 / 0.007986 (-0.001668) | 0.004063 / 0.004328 (-0.000266) | 0.076917 / 0.004250 (0.072667) | 0.058034 / 0.037052 (0.020981) | 0.474265 / 0.258489 (0.215775) | 0.497736 / 0.293841 (0.203895) | 0.044587 / 0.128546 (-0.083959) | 0.013880 / 0.075646 (-0.061766) | 0.089233 / 0.419271 (-0.330038) | 0.058760 / 0.043533 (0.015227) | 0.439515 / 0.255139 (0.184376) | 0.473246 / 0.283200 (0.190047) | 0.042968 / 0.141683 (-0.098715) | 1.802647 / 1.452155 (0.350493) | 1.778563 / 1.492716 (0.285847) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.343741 / 0.018006 (0.325735) | 0.567409 / 0.000490 (0.566919) | 0.029727 / 0.000200 (0.029527) | 0.000147 / 0.000054 (0.000092) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031021 / 0.037411 (-0.006390) | 0.096659 / 0.014526 (0.082133) | 0.103341 / 0.176557 (-0.073215) | 0.169893 / 0.737135 (-0.567242) | 0.103280 / 0.296338 (-0.193058) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.584724 / 0.215209 (0.369515) | 5.792596 / 2.077655 (3.714941) | 2.683133 / 1.504120 (1.179013) | 2.367837 / 1.541195 (0.826643) | 2.378567 / 1.468490 (0.910076) | 0.803427 / 4.584777 (-3.781350) | 5.179017 / 3.745712 (1.433305) | 4.446323 / 5.269862 (-0.823538) | 2.771731 / 4.565676 (-1.793945) | 0.100943 / 0.424275 (-0.323332) | 0.009875 / 0.007607 (0.002268) | 0.725260 / 0.226044 (0.499216) | 7.149728 / 2.268929 (4.880800) | 3.646438 / 55.444624 (-51.798187) | 2.793858 / 6.876477 (-4.082618) | 2.971966 / 2.142072 (0.829894) | 0.998147 / 4.805227 (-3.807080) | 0.198004 / 6.500664 (-6.302660) | 0.072581 / 0.075469 (-0.002888) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.696737 / 1.841788 (-0.145051) | 22.615193 / 8.074308 (14.540884) | 20.272421 / 10.191392 (10.081029) | 0.237459 / 0.680424 (-0.442965) | 0.034774 / 0.534201 (-0.499427) | 0.484649 / 0.579283 (-0.094634) | 0.590263 / 0.434364 (0.155899) | 0.547833 / 0.540337 (0.007495) | 0.762109 / 1.386936 (-0.624827) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4bc3628b5a8f71ad7cfc014d8ba5e798f26becb7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.011183 / 0.011353 (-0.000170) | 0.005267 / 0.011008 (-0.005741) | 0.108506 / 0.038508 (0.069997) | 0.083541 / 0.023109 (0.060431) | 0.452189 / 0.275898 (0.176291) | 0.496229 / 0.323480 (0.172749) | 0.004951 / 0.007986 (-0.003035) | 0.004452 / 0.004328 (0.000124) | 0.085133 / 0.004250 (0.080883) | 0.061291 / 0.037052 (0.024239) | 0.450453 / 0.258489 (0.191964) | 0.506456 / 0.293841 (0.212616) | 0.049784 / 0.128546 (-0.078762) | 0.014738 / 0.075646 (-0.060908) | 0.372603 / 0.419271 (-0.046669) | 0.065223 / 0.043533 (0.021690) | 0.467872 / 0.255139 (0.212733) | 0.500062 / 0.283200 (0.216862) | 0.040911 / 0.141683 (-0.100772) | 1.852970 / 1.452155 (0.400816) | 2.016996 / 1.492716 (0.524280) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.262620 / 0.018006 (0.244614) | 0.593925 / 0.000490 (0.593435) | 0.000413 / 0.000200 (0.000213) | 0.000085 / 0.000054 (0.000030) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.035713 / 0.037411 (-0.001698) | 0.111403 / 0.014526 (0.096878) | 0.117259 / 0.176557 (-0.059298) | 0.201545 / 0.737135 (-0.535590) | 0.133111 / 0.296338 (-0.163228) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.597318 / 0.215209 (0.382109) | 5.882691 / 2.077655 (3.805036) | 2.572203 / 1.504120 (1.068083) | 2.248016 / 1.541195 (0.706821) | 2.359103 / 1.468490 (0.890613) | 0.852023 / 4.584777 (-3.732754) | 5.270831 / 3.745712 (1.525119) | 4.712915 / 5.269862 (-0.556947) | 3.124295 / 4.565676 (-1.441381) | 0.092045 / 0.424275 (-0.332230) | 0.007834 / 0.007607 (0.000227) | 0.695711 / 0.226044 (0.469666) | 7.011760 / 2.268929 (4.742831) | 3.333300 / 55.444624 (-52.111325) | 2.745889 / 6.876477 (-4.130587) | 3.153458 / 2.142072 (1.011385) | 1.011089 / 4.805227 (-3.794139) | 0.207467 / 6.500664 (-6.293197) | 0.079802 / 0.075469 (0.004333) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.703784 / 1.841788 (-0.138003) | 24.414340 / 8.074308 (16.340032) | 22.534528 / 10.191392 (12.343136) | 0.276129 / 0.680424 (-0.404295) | 0.027954 / 0.534201 (-0.506247) | 0.484261 / 0.579283 (-0.095022) | 0.605316 / 0.434364 (0.170952) | 0.557219 / 0.540337 (0.016882) | 0.802209 / 1.386936 (-0.584727) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009109 / 0.011353 (-0.002244) | 0.005376 / 0.011008 (-0.005632) | 0.085141 / 0.038508 (0.046633) | 0.100560 / 0.023109 (0.077450) | 0.482673 / 0.275898 (0.206775) | 0.551582 / 0.323480 (0.228103) | 0.006756 / 0.007986 (-0.001229) | 0.004171 / 0.004328 (-0.000158) | 0.084184 / 0.004250 (0.079933) | 0.069283 / 0.037052 (0.032230) | 0.517722 / 0.258489 (0.259233) | 0.542641 / 0.293841 (0.248801) | 0.047790 / 0.128546 (-0.080756) | 0.014063 / 0.075646 (-0.061583) | 0.110591 / 0.419271 (-0.308680) | 0.064373 / 0.043533 (0.020840) | 0.496636 / 0.255139 (0.241497) | 0.551906 / 0.283200 (0.268707) | 0.046187 / 0.141683 (-0.095496) | 1.864836 / 1.452155 (0.412681) | 1.923765 / 1.492716 (0.431049) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.286558 / 0.018006 (0.268552) | 0.610353 / 0.000490 (0.609863) | 0.012647 / 0.000200 (0.012447) | 0.000162 / 0.000054 (0.000107) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.037099 / 0.037411 (-0.000313) | 0.108608 / 0.014526 (0.094082) | 0.120386 / 0.176557 (-0.056170) | 0.183450 / 0.737135 (-0.553686) | 0.124860 / 0.296338 (-0.171479) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.629006 / 0.215209 (0.413797) | 6.309206 / 2.077655 (4.231551) | 2.878558 / 1.504120 (1.374438) | 2.616093 / 1.541195 (1.074898) | 2.668096 / 1.468490 (1.199606) | 0.865732 / 4.584777 (-3.719045) | 5.312433 / 3.745712 (1.566721) | 4.799352 / 5.269862 (-0.470509) | 3.142207 / 4.565676 (-1.423469) | 0.099591 / 0.424275 (-0.324684) | 0.009159 / 0.007607 (0.001552) | 0.730999 / 0.226044 (0.504954) | 7.486442 / 2.268929 (5.217513) | 3.657699 / 55.444624 (-51.786925) | 3.080094 / 6.876477 (-3.796383) | 3.320976 / 2.142072 (1.178904) | 1.089324 / 4.805227 (-3.715904) | 0.222831 / 6.500664 (-6.277833) | 0.083976 / 0.075469 (0.008507) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.793181 / 1.841788 (-0.048607) | 25.307444 / 8.074308 (17.233136) | 21.321713 / 10.191392 (11.130321) | 0.216326 / 0.680424 (-0.464098) | 0.034298 / 0.534201 (-0.499903) | 0.497173 / 0.579283 (-0.082110) | 0.643550 / 0.434364 (0.209186) | 0.581213 / 0.540337 (0.040876) | 0.830973 / 1.386936 (-0.555963) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#24875bb8494c3a7803182b08c70747b1b1a6bf4d \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006886 / 0.011353 (-0.004467) | 0.004267 / 0.011008 (-0.006741) | 0.086182 / 0.038508 (0.047674) | 0.083405 / 0.023109 (0.060296) | 0.313717 / 0.275898 (0.037819) | 0.351476 / 0.323480 (0.027996) | 0.005702 / 0.007986 (-0.002284) | 0.003802 / 0.004328 (-0.000526) | 0.065759 / 0.004250 (0.061508) | 0.060056 / 0.037052 (0.023003) | 0.315871 / 0.258489 (0.057382) | 0.364520 / 0.293841 (0.070679) | 0.032067 / 0.128546 (-0.096479) | 0.008679 / 0.075646 (-0.066967) | 0.294968 / 0.419271 (-0.124303) | 0.054684 / 0.043533 (0.011152) | 0.314124 / 0.255139 (0.058985) | 0.337312 / 0.283200 (0.054113) | 0.025051 / 0.141683 (-0.116632) | 1.505242 / 1.452155 (0.053087) | 1.608263 / 1.492716 (0.115547) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.266562 / 0.018006 (0.248556) | 0.579887 / 0.000490 (0.579397) | 0.004161 / 0.000200 (0.003961) | 0.000090 / 0.000054 (0.000035) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031153 / 0.037411 (-0.006258) | 0.087703 / 0.014526 (0.073177) | 0.103864 / 0.176557 (-0.072693) | 0.159032 / 0.737135 (-0.578104) | 0.102482 / 0.296338 (-0.193857) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.405805 / 0.215209 (0.190596) | 4.050669 / 2.077655 (1.973014) | 2.064384 / 1.504120 (0.560264) | 1.892825 / 1.541195 (0.351630) | 2.001083 / 1.468490 (0.532593) | 0.478174 / 4.584777 (-4.106603) | 3.542580 / 3.745712 (-0.203132) | 3.319205 / 5.269862 (-1.950656) | 2.075868 / 4.565676 (-2.489808) | 0.057345 / 0.424275 (-0.366930) | 0.007459 / 0.007607 (-0.000148) | 0.483564 / 0.226044 (0.257520) | 4.827746 / 2.268929 (2.558818) | 2.579541 / 55.444624 (-52.865083) | 2.205125 / 6.876477 (-4.671352) | 2.489206 / 2.142072 (0.347133) | 0.575843 / 4.805227 (-4.229384) | 0.133010 / 6.500664 (-6.367654) | 0.061082 / 0.075469 (-0.014387) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.286059 / 1.841788 (-0.555729) | 20.575173 / 8.074308 (12.500865) | 14.351692 / 10.191392 (4.160300) | 0.150401 / 0.680424 (-0.530022) | 0.018678 / 0.534201 (-0.515523) | 0.397860 / 0.579283 (-0.181423) | 0.419474 / 0.434364 (-0.014890) | 0.474492 / 0.540337 (-0.065846) | 0.659510 / 1.386936 (-0.727426) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006948 / 0.011353 (-0.004405) | 0.004305 / 0.011008 (-0.006703) | 0.064220 / 0.038508 (0.025712) | 0.083251 / 0.023109 (0.060142) | 0.388148 / 0.275898 (0.112250) | 0.417834 / 0.323480 (0.094354) | 0.005762 / 0.007986 (-0.002224) | 0.003803 / 0.004328 (-0.000525) | 0.066365 / 0.004250 (0.062114) | 0.061808 / 0.037052 (0.024756) | 0.390889 / 0.258489 (0.132400) | 0.430619 / 0.293841 (0.136778) | 0.031777 / 0.128546 (-0.096770) | 0.008781 / 0.075646 (-0.066865) | 0.070844 / 0.419271 (-0.348427) | 0.050552 / 0.043533 (0.007019) | 0.378420 / 0.255139 (0.123281) | 0.403273 / 0.283200 (0.120074) | 0.024578 / 0.141683 (-0.117105) | 1.494790 / 1.452155 (0.042636) | 1.549408 / 1.492716 (0.056692) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.302668 / 0.018006 (0.284662) | 0.542235 / 0.000490 (0.541746) | 0.001847 / 0.000200 (0.001647) | 0.000092 / 0.000054 (0.000037) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031947 / 0.037411 (-0.005465) | 0.092220 / 0.014526 (0.077694) | 0.104525 / 0.176557 (-0.072031) | 0.162000 / 0.737135 (-0.575135) | 0.106795 / 0.296338 (-0.189543) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.412035 / 0.215209 (0.196826) | 4.106527 / 2.077655 (2.028872) | 2.111529 / 1.504120 (0.607409) | 1.953201 / 1.541195 (0.412006) | 2.079258 / 1.468490 (0.610768) | 0.479562 / 4.584777 (-4.105215) | 3.606256 / 3.745712 (-0.139456) | 5.175250 / 5.269862 (-0.094612) | 3.292465 / 4.565676 (-1.273212) | 0.057726 / 0.424275 (-0.366549) | 0.008247 / 0.007607 (0.000640) | 0.486143 / 0.226044 (0.260098) | 4.859051 / 2.268929 (2.590123) | 2.675629 / 55.444624 (-52.768995) | 2.267448 / 6.876477 (-4.609029) | 2.567639 / 2.142072 (0.425567) | 0.580822 / 4.805227 (-4.224406) | 0.134942 / 6.500664 (-6.365722) | 0.063825 / 0.075469 (-0.011644) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.334421 / 1.841788 (-0.507367) | 20.481428 / 8.074308 (12.407120) | 14.227943 / 10.191392 (4.036551) | 0.170711 / 0.680424 (-0.509713) | 0.018212 / 0.534201 (-0.515989) | 0.397212 / 0.579283 (-0.182071) | 0.411934 / 0.434364 (-0.022430) | 0.478019 / 0.540337 (-0.062319) | 0.645434 / 1.386936 (-0.741502) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ef3d3f10886e23a65cce3bfd939b8ec0d5a5c2c1 \"CML watermark\")\n" ]
2023-07-24T12:36:38Z
2023-07-24T14:43:48Z
2023-07-24T14:34:43Z
COLLABORATOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6062/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6062/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6062.diff", "html_url": "https://github.com/huggingface/datasets/pull/6062", "merged_at": "2023-07-24T14:34:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/6062.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6062" }
https://api.github.com/repos/huggingface/datasets/issues/5180
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5180/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5180/comments
https://api.github.com/repos/huggingface/datasets/issues/5180/events
https://github.com/huggingface/datasets/issues/5180
1,431,012,438
I_kwDODunzps5VS4RW
5,180
An example or recommendations for creating large image datasets?
{ "avatar_url": "https://avatars.githubusercontent.com/u/22957388?v=4", "events_url": "https://api.github.com/users/sayakpaul/events{/privacy}", "followers_url": "https://api.github.com/users/sayakpaul/followers", "following_url": "https://api.github.com/users/sayakpaul/following{/other_user}", "gists_url": "https://api.github.com/users/sayakpaul/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sayakpaul", "id": 22957388, "login": "sayakpaul", "node_id": "MDQ6VXNlcjIyOTU3Mzg4", "organizations_url": "https://api.github.com/users/sayakpaul/orgs", "received_events_url": "https://api.github.com/users/sayakpaul/received_events", "repos_url": "https://api.github.com/users/sayakpaul/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sayakpaul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sayakpaul/subscriptions", "type": "User", "url": "https://api.github.com/users/sayakpaul", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "The beam utilities allow to prepare a dataset as parquet in your cloud storage. From my perspective this CLI is not super easy to use, but we've been working on a new python API to prepare a dataset in your cloud storage:\r\n```python\r\nfrom datasets import load_dataset_builder\r\n\r\nbuilder = load_dataset_builder(\"c4\", \"en\")\r\nbuilder.download_and_prepapre(\"s3://my-bucket/c4\", file_format=\"parquet\")\r\n```\r\n\r\nAnd to use Beam you can do:\r\n```python\r\nbeam_runner = ... # one of \"SparkRunner\", \"DataFlowRunner\", \"DirectRunner\", etc.\r\nbeam_options = ...\r\n\r\nbuilder.download_and_prepapre(\r\n \"s3://my-bucket/c4\",\r\n file_format=\"parquet\",\r\n beam_runner=beam_runner,\r\n beam_options=beam_options\r\n)\r\n```\r\n\r\nThough Beam can be used ONLY if there is a dataset script based on the `BeamBasedBuilder` right now - it doesn't work on an arbitrary dataset (see [wikipedia.py](https://huggingface.co/datasets/wikipedia/blob/main/wikipedia.py) for example).", "Thanks! \r\n\r\nWould be nice to have something similar for creating large image datasets. " ]
2022-11-01T07:38:38Z
2022-11-02T10:17:11Z
null
MEMBER
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
I know that Apache Beam and `datasets` have [some connector utilities](https://huggingface.co/docs/datasets/beam). But it's a little unclear what we mean by "But if you want to run your own Beam pipeline with Dataflow, here is how:". What does that pipeline do? As a user, I was wondering if we have this support for creating large image datasets. If so, we should mention that [here](https://huggingface.co/docs/datasets/image_dataset). Cc @lhoestq
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5180/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5180/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/4723
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4723/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4723/comments
https://api.github.com/repos/huggingface/datasets/issues/4723/events
https://github.com/huggingface/datasets/pull/4723
1,310,970,604
PR_kwDODunzps47uoSj
4,723
Refactor conftest fixtures
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-07-20T12:15:22Z
2022-07-21T14:37:11Z
2022-07-21T14:24:18Z
MEMBER
null
null
null
Previously, fixture modules `hub_fixtures` and `s3_fixtures`: - were both at the root test directory - were imported using `import *` - as a side effect, the modules `os` and `pytest` were imported from `s3_fixtures` into `conftest` This PR: - puts both fixture modules in a dedicated directory `fixtures` - renames both to: `fixtures.hub` and `fixtures.s3` - imports them into `conftest` as plugins, using the `pytest_plugins`: this avoids the `import *` - additionally creates a new fixture module `fixtures.files` with all file-related fixtures
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/4723/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4723/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4723.diff", "html_url": "https://github.com/huggingface/datasets/pull/4723", "merged_at": "2022-07-21T14:24:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/4723.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4723" }
https://api.github.com/repos/huggingface/datasets/issues/5076
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5076/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5076/comments
https://api.github.com/repos/huggingface/datasets/issues/5076/events
https://github.com/huggingface/datasets/pull/5076
1,397,918,092
PR_kwDODunzps5AOJp7
5,076
fix: update exception throw from OSError to EnvironmentError in `push…
{ "avatar_url": "https://avatars.githubusercontent.com/u/29496999?v=4", "events_url": "https://api.github.com/users/rahulXs/events{/privacy}", "followers_url": "https://api.github.com/users/rahulXs/followers", "following_url": "https://api.github.com/users/rahulXs/following{/other_user}", "gists_url": "https://api.github.com/users/rahulXs/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rahulXs", "id": 29496999, "login": "rahulXs", "node_id": "MDQ6VXNlcjI5NDk2OTk5", "organizations_url": "https://api.github.com/users/rahulXs/orgs", "received_events_url": "https://api.github.com/users/rahulXs/received_events", "repos_url": "https://api.github.com/users/rahulXs/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rahulXs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rahulXs/subscriptions", "type": "User", "url": "https://api.github.com/users/rahulXs", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-05T14:46:29Z
2022-10-07T14:35:57Z
2022-10-07T14:33:27Z
CONTRIBUTOR
null
null
null
Status: Ready for review Description of Changes: Fixes #5075 Changes proposed in this pull request: - Throw EnvironmentError instead of OSError in `push_to_hub` when the Hub token is not present.
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5076/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5076/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5076.diff", "html_url": "https://github.com/huggingface/datasets/pull/5076", "merged_at": "2022-10-07T14:33:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/5076.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5076" }
https://api.github.com/repos/huggingface/datasets/issues/5711
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5711/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5711/comments
https://api.github.com/repos/huggingface/datasets/issues/5711/events
https://github.com/huggingface/datasets/issues/5711
1,655,971,647
I_kwDODunzps5itB8_
5,711
load_dataset in v2.11.0 raises "ValueError: seek of closed file" in np.load()
{ "avatar_url": "https://avatars.githubusercontent.com/u/1219084?v=4", "events_url": "https://api.github.com/users/rcasero/events{/privacy}", "followers_url": "https://api.github.com/users/rcasero/followers", "following_url": "https://api.github.com/users/rcasero/following{/other_user}", "gists_url": "https://api.github.com/users/rcasero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rcasero", "id": 1219084, "login": "rcasero", "node_id": "MDQ6VXNlcjEyMTkwODQ=", "organizations_url": "https://api.github.com/users/rcasero/orgs", "received_events_url": "https://api.github.com/users/rcasero/received_events", "repos_url": "https://api.github.com/users/rcasero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rcasero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rcasero/subscriptions", "type": "User", "url": "https://api.github.com/users/rcasero", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[ "It seems like https://github.com/huggingface/datasets/pull/5626 has introduced this error. \r\n\r\ncc @albertvillanova \r\n\r\nI think replacing:\r\nhttps://github.com/huggingface/datasets/blob/0803a006db1c395ac715662cc6079651f77c11ea/src/datasets/download/streaming_download_manager.py#L777-L778\r\nwith:\r\n```python\r\nreturn np.load(xopen(filepath_or_buffer, \"rb\", use_auth_token=use_auth_token), *args, **kwargs)\r\n```\r\nshould fix the issue.\r\n\r\n(Maybe this is also worth doing a patch release afterward)", "Thanks for reporting, @rcasero.\r\n\r\nI can have a look..." ]
2023-04-05T16:46:49Z
2023-04-07T09:16:59Z
2023-04-07T09:16:59Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hi, I have some `dataset_load()` code of a custom offline dataset that works with datasets v2.10.1. ```python ds = datasets.load_dataset(path=dataset_dir, name=configuration, data_dir=dataset_dir, cache_dir=cache_dir, aux_dir=aux_dir, # download_mode=datasets.DownloadMode.FORCE_REDOWNLOAD, num_proc=18) ``` When upgrading datasets to 2.11.0, it fails with error ``` Traceback (most recent call last): File "<string>", line 2, in <module> File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/load.py", line 1791, in load_dataset builder_instance.download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 891, in download_and_prepare self._download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 1651, in _download_and_prepare super()._download_and_prepare( File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/datasets/builder.py", line 964, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/ramon.casero/.cache/huggingface/modules/datasets_modules/datasets/71f67f69e6e00e139903a121f96b71f39b65a6b6aaeb0862e6a5da3a3f565b4c/mydataset.py", line 682, in _split_generators self.some_function() File "/home/ramon.casero/.cache/huggingface/modules/datasets_modules/datasets/71f67f69e6e00e139903a121f96b71f39b65a6b6aaeb0862e6a5da3a3f565b4c/mydataset.py", line 1314, in some_function() x_df = pd.DataFrame({'cell_type_descriptor': fp['x'].tolist()}) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/site-packages/numpy/lib/npyio.py", line 248, in __getitem__ bytes = self.zip.open(key) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/zipfile.py", line 1530, in open fheader = zef_file.read(sizeFileHeader) File "/home/ramon.casero/opt/miniconda3/envs/myenv/lib/python3.10/zipfile.py", line 744, in read self._file.seek(self._pos) ValueError: seek of closed file ``` ### Steps to reproduce the bug Sorry, I cannot share the data or code because they are not mine to share, but the point of failure is a call in `some_function()` ```python with np.load(embedding_filename) as fp: x_df = pd.DataFrame({'feature': fp['x'].tolist()}) ``` I'll try to generate a short snippet that reproduces the error. ### Expected behavior I would expect that `load_dataset` works on the custom datasets generation script for v2.11.0 the same way it works for 2.10.1, without making `np.load()` give a `ValueError: seek of closed file` error. ### Environment info - `datasets` version: 2.11.0 - Platform: Linux-4.18.0-483.el8.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.8 - Huggingface_hub version: 0.12.0 - PyArrow version: 11.0.0 - Pandas version: 1.5.2 - numpy: 1.24.2 - This is an offline dataset that uses `datasets.config.HF_DATASETS_OFFLINE = True` in the generation script.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5711/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5711/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5311
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5311/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5311/comments
https://api.github.com/repos/huggingface/datasets/issues/5311/events
https://github.com/huggingface/datasets/pull/5311
1,467,875,153
PR_kwDODunzps5D4Mm3
5,311
Add `features` param to `IterableDataset.map`
{ "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alvarobartt", "id": 36760800, "login": "alvarobartt", "node_id": "MDQ6VXNlcjM2NzYwODAw", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "repos_url": "https://api.github.com/users/alvarobartt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "type": "User", "url": "https://api.github.com/users/alvarobartt", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-11-29T11:08:34Z
2022-12-06T15:45:02Z
2022-12-06T15:42:04Z
MEMBER
null
null
null
## Description As suggested by @lhoestq in #3888, we should be adding the param `features` to `IterableDataset.map` so that the features can be preserved (not turned into `None` as that's the default behavior) whenever the user passes those as param, so as to be consistent with `Dataset.map`, as it provides the `features` param so that those are not inferred by default, but specified by the user, and later validated by `ArrowWriter`. This is internally handled already by the functions relying on `IterableDataset.map` such as `rename_column`, `rename_columns`, and `remove_columns` as described in #5287. ## Usage Example ```python from datasets import load_dataset, Features ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) print(ds.info.features) ds = ds.map( lambda x: {"target": x["label"]}, features=Features( {"target": ds.info.features["label"], "label": ds.info.features["label"], "text": ds.info.features["text"]} ), ) print(ds.info.features) ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5311/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5311/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5311.diff", "html_url": "https://github.com/huggingface/datasets/pull/5311", "merged_at": "2022-12-06T15:42:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/5311.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5311" }
https://api.github.com/repos/huggingface/datasets/issues/6022
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6022/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6022/comments
https://api.github.com/repos/huggingface/datasets/issues/6022/events
https://github.com/huggingface/datasets/issues/6022
1,800,092,589
I_kwDODunzps5rSzut
6,022
Batch map raises TypeError: '>=' not supported between instances of 'NoneType' and 'int'
{ "avatar_url": "https://avatars.githubusercontent.com/u/138426806?v=4", "events_url": "https://api.github.com/users/codingl2k1/events{/privacy}", "followers_url": "https://api.github.com/users/codingl2k1/followers", "following_url": "https://api.github.com/users/codingl2k1/following{/other_user}", "gists_url": "https://api.github.com/users/codingl2k1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/codingl2k1", "id": 138426806, "login": "codingl2k1", "node_id": "U_kgDOCEA5tg", "organizations_url": "https://api.github.com/users/codingl2k1/orgs", "received_events_url": "https://api.github.com/users/codingl2k1/received_events", "repos_url": "https://api.github.com/users/codingl2k1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/codingl2k1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/codingl2k1/subscriptions", "type": "User", "url": "https://api.github.com/users/codingl2k1", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "Thanks for reporting! I've opened a PR with a fix." ]
2023-07-12T03:20:17Z
2023-07-12T16:18:06Z
2023-07-12T16:18:05Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug When mapping some datasets with `batched=True`, datasets may raise an exeception: ```python Traceback (most recent call last): File "/Users/codingl2k1/Work/datasets/venv/lib/python3.11/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) ^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1328, in _write_generator_to_queue for i, result in enumerate(func(**kwargs)): File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 3483, in _map_single writer.write_batch(batch) File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_writer.py", line 549, in write_batch array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 1831, in wrapper return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 1831, in <listcomp> return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 2063, in cast_array_to_feature return feature.cast_storage(array) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/features/features.py", line 1098, in cast_storage if min_max["max"] >= self.num_classes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: '>=' not supported between instances of 'NoneType' and 'int' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/codingl2k1/Work/datasets/t1.py", line 33, in <module> ds = ds.map(transforms, num_proc=14, batched=True, batch_size=5) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/dataset_dict.py", line 850, in map { File "/Users/codingl2k1/Work/datasets/src/datasets/dataset_dict.py", line 851, in <dictcomp> k: dataset.map( ^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 577, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 542, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 3179, in map for rank, done, content in iflatmap_unordered( File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1368, in iflatmap_unordered [async_result.get(timeout=0.05) for async_result in async_results] File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1368, in <listcomp> [async_result.get(timeout=0.05) for async_result in async_results] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/venv/lib/python3.11/site-packages/multiprocess/pool.py", line 774, in get raise self._value TypeError: '>=' not supported between instances of 'NoneType' and 'int' ``` ### Steps to reproduce the bug 1. Checkout the latest main of datasets. 2. Run the code: ```python from datasets import load_dataset def transforms(examples): # examples["pixel_values"] = [image.convert("RGB").resize((100, 100)) for image in examples["image"]] return examples ds = load_dataset("scene_parse_150") ds = ds.map(transforms, num_proc=14, batched=True, batch_size=5) print(ds) ``` ### Expected behavior map without exception. ### Environment info Datasets: https://github.com/huggingface/datasets/commit/b8067c0262073891180869f700ebef5ac3dc5cce Python: 3.11.4 System: Macos
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6022/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6022/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/4859
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4859/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4859/comments
https://api.github.com/repos/huggingface/datasets/issues/4859/events
https://github.com/huggingface/datasets/issues/4859
1,342,231,016
I_kwDODunzps5QANHo
4,859
can't install using conda on Windows 10
{ "avatar_url": "https://avatars.githubusercontent.com/u/22627691?v=4", "events_url": "https://api.github.com/users/xoffey/events{/privacy}", "followers_url": "https://api.github.com/users/xoffey/followers", "following_url": "https://api.github.com/users/xoffey/following{/other_user}", "gists_url": "https://api.github.com/users/xoffey/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/xoffey", "id": 22627691, "login": "xoffey", "node_id": "MDQ6VXNlcjIyNjI3Njkx", "organizations_url": "https://api.github.com/users/xoffey/orgs", "received_events_url": "https://api.github.com/users/xoffey/received_events", "repos_url": "https://api.github.com/users/xoffey/repos", "site_admin": false, "starred_url": "https://api.github.com/users/xoffey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xoffey/subscriptions", "type": "User", "url": "https://api.github.com/users/xoffey", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[]
2022-08-17T19:57:37Z
2022-08-17T19:57:37Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
## Describe the bug I wanted to install using conda or Anaconda navigator. That didn't work, so I had to install using pip. ## Steps to reproduce the bug conda install -c huggingface -c conda-forge datasets ## Expected results Should have indicated successful installation. ## Actual results Solving environment: failed with initial frozen solve. Retrying with flexible solve. Solving environment: failed with repodata from current_repodata.json, will retry with next repodata source. ... took forever, so I cancelled it with ctrl-c ## Environment info - `datasets` version: 2.4.0 # after installing with pip - Platform: Windows-10-10.0.19044-SP0 - Python version: 3.9.12 - PyArrow version: 9.0.0 - Pandas version: 1.4.2 - conda version: 4.13.0 conda info active environment : base active env location : G:\anaconda2022 shell level : 1 user config file : C:\Users\michael\.condarc populated config files : C:\Users\michael\.condarc conda version : 4.13.0 conda-build version : 3.21.8 python version : 3.9.12.final.0 virtual packages : __cuda=11.1=0 __win=0=0 __archspec=1=x86_64 base environment : G:\anaconda2022 (writable) conda av data dir : G:\anaconda2022\etc\conda conda av metadata url : None channel URLs : https://conda.anaconda.org/pytorch/win-64 https://conda.anaconda.org/pytorch/noarch https://conda.anaconda.org/huggingface/win-64 https://conda.anaconda.org/huggingface/noarch https://conda.anaconda.org/conda-forge/win-64 https://conda.anaconda.org/conda-forge/noarch https://conda.anaconda.org/anaconda-fusion/win-64 https://conda.anaconda.org/anaconda-fusion/noarch https://repo.anaconda.com/pkgs/main/win-64 https://repo.anaconda.com/pkgs/main/noarch https://repo.anaconda.com/pkgs/r/win-64 https://repo.anaconda.com/pkgs/r/noarch https://repo.anaconda.com/pkgs/msys2/win-64 https://repo.anaconda.com/pkgs/msys2/noarch package cache : G:\anaconda2022\pkgs C:\Users\michael\.conda\pkgs C:\Users\michael\AppData\Local\conda\conda\pkgs envs directories : G:\anaconda2022\envs C:\Users\michael\.conda\envs C:\Users\michael\AppData\Local\conda\conda\envs platform : win-64 user-agent : conda/4.13.0 requests/2.27.1 CPython/3.9.12 Windows/10 Windows/10.0.19044 administrator : False netrc file : None offline mode : False
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4859/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4859/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5008
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5008/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5008/comments
https://api.github.com/repos/huggingface/datasets/issues/5008/events
https://github.com/huggingface/datasets/pull/5008
1,381,090,903
PR_kwDODunzps4_XAc5
5,008
Re-apply input columns change
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-21T15:09:01Z
2022-09-22T13:57:36Z
2022-09-22T13:55:23Z
COLLABORATOR
null
null
null
Fixes the `filter` + `input_columns` combination, which is used in the `transformers` examples for instance. Revert #5006 (which in turn reverts #4971) Fix https://github.com/huggingface/datasets/issues/4858
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5008/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5008/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5008.diff", "html_url": "https://github.com/huggingface/datasets/pull/5008", "merged_at": "2022-09-22T13:55:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/5008.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5008" }
https://api.github.com/repos/huggingface/datasets/issues/6987
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6987/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6987/comments
https://api.github.com/repos/huggingface/datasets/issues/6987/events
https://github.com/huggingface/datasets/pull/6987
2,363,728,190
PR_kwDODunzps5zCRH6
6,987
Remove beam
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
{ "closed_at": null, "closed_issues": 5, "created_at": "2023-02-13T16:22:42Z", "creator": { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }, "description": "Next major release", "due_on": null, "html_url": "https://github.com/huggingface/datasets/milestone/10", "id": 9038583, "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/10/labels", "node_id": "MI_kwDODunzps4Aier3", "number": 10, "open_issues": 3, "state": "open", "title": "3.0", "updated_at": "2024-08-21T09:35:06Z", "url": "https://api.github.com/repos/huggingface/datasets/milestones/10" }
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6987). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005931 / 0.011353 (-0.005422) | 0.004127 / 0.011008 (-0.006881) | 0.063854 / 0.038508 (0.025346) | 0.034687 / 0.023109 (0.011577) | 0.251397 / 0.275898 (-0.024501) | 0.280348 / 0.323480 (-0.043132) | 0.005008 / 0.007986 (-0.002977) | 0.002930 / 0.004328 (-0.001398) | 0.050703 / 0.004250 (0.046452) | 0.047109 / 0.037052 (0.010057) | 0.258525 / 0.258489 (0.000035) | 0.288759 / 0.293841 (-0.005081) | 0.030547 / 0.128546 (-0.097999) | 0.102184 / 0.075646 (0.026537) | 0.207934 / 0.419271 (-0.211338) | 0.036477 / 0.043533 (-0.007056) | 0.338160 / 0.255139 (0.083021) | 0.310735 / 0.283200 (0.027535) | 0.018637 / 0.141683 (-0.123045) | 1.228539 / 1.452155 (-0.223616) | 1.168004 / 1.492716 (-0.324713) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098355 / 0.018006 (0.080348) | 0.302310 / 0.000490 (0.301820) | 0.000215 / 0.000200 (0.000015) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019607 / 0.037411 (-0.017804) | 0.063795 / 0.014526 (0.049269) | 0.075029 / 0.176557 (-0.101528) | 0.121293 / 0.737135 (-0.615842) | 0.076480 / 0.296338 (-0.219858) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285285 / 0.215209 (0.070076) | 2.747455 / 2.077655 (0.669801) | 1.454190 / 1.504120 (-0.049929) | 1.330777 / 1.541195 (-0.210418) | 1.358292 / 1.468490 (-0.110198) | 0.724991 / 4.584777 (-3.859786) | 2.374889 / 3.745712 (-1.370823) | 2.985868 / 5.269862 (-2.283994) | 1.921521 / 4.565676 (-2.644156) | 0.078589 / 0.424275 (-0.345686) | 0.005104 / 0.007607 (-0.002503) | 0.333898 / 0.226044 (0.107853) | 3.317702 / 2.268929 (1.048773) | 1.887161 / 55.444624 (-53.557463) | 1.510700 / 6.876477 (-5.365777) | 1.544175 / 2.142072 (-0.597898) | 0.804262 / 4.805227 (-4.000965) | 0.134015 / 6.500664 (-6.366649) | 0.042819 / 0.075469 (-0.032650) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.012142 / 1.841788 (-0.829645) | 11.861780 / 8.074308 (3.787472) | 9.797285 / 10.191392 (-0.394107) | 0.142114 / 0.680424 (-0.538310) | 0.013984 / 0.534201 (-0.520217) | 0.302412 / 0.579283 (-0.276871) | 0.265060 / 0.434364 (-0.169304) | 0.337510 / 0.540337 (-0.202828) | 0.432197 / 1.386936 (-0.954739) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005920 / 0.011353 (-0.005433) | 0.003991 / 0.011008 (-0.007017) | 0.049874 / 0.038508 (0.011366) | 0.033771 / 0.023109 (0.010662) | 0.264789 / 0.275898 (-0.011109) | 0.287554 / 0.323480 (-0.035926) | 0.004341 / 0.007986 (-0.003644) | 0.002888 / 0.004328 (-0.001441) | 0.049383 / 0.004250 (0.045133) | 0.040757 / 0.037052 (0.003704) | 0.286067 / 0.258489 (0.027578) | 0.311105 / 0.293841 (0.017264) | 0.031482 / 0.128546 (-0.097064) | 0.012358 / 0.075646 (-0.063288) | 0.060298 / 0.419271 (-0.358973) | 0.033237 / 0.043533 (-0.010296) | 0.265804 / 0.255139 (0.010665) | 0.281273 / 0.283200 (-0.001927) | 0.017879 / 0.141683 (-0.123804) | 1.154059 / 1.452155 (-0.298096) | 1.156758 / 1.492716 (-0.335958) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.004677 / 0.018006 (-0.013329) | 0.300768 / 0.000490 (0.300278) | 0.000212 / 0.000200 (0.000013) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023032 / 0.037411 (-0.014379) | 0.077498 / 0.014526 (0.062973) | 0.089134 / 0.176557 (-0.087422) | 0.129691 / 0.737135 (-0.607444) | 0.091372 / 0.296338 (-0.204967) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290823 / 0.215209 (0.075613) | 2.873159 / 2.077655 (0.795504) | 1.563361 / 1.504120 (0.059241) | 1.447048 / 1.541195 (-0.094147) | 1.490473 / 1.468490 (0.021983) | 0.715642 / 4.584777 (-3.869135) | 0.996223 / 3.745712 (-2.749489) | 2.861466 / 5.269862 (-2.408396) | 1.915581 / 4.565676 (-2.650096) | 0.077892 / 0.424275 (-0.346383) | 0.005463 / 0.007607 (-0.002144) | 0.339670 / 0.226044 (0.113626) | 3.412830 / 2.268929 (1.143902) | 1.908676 / 55.444624 (-53.535949) | 1.625358 / 6.876477 (-5.251119) | 1.769437 / 2.142072 (-0.372635) | 0.792505 / 4.805227 (-4.012722) | 0.133007 / 6.500664 (-6.367657) | 0.041305 / 0.075469 (-0.034164) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.986882 / 1.841788 (-0.854905) | 12.368101 / 8.074308 (4.293793) | 10.367439 / 10.191392 (0.176047) | 0.141248 / 0.680424 (-0.539176) | 0.016144 / 0.534201 (-0.518057) | 0.300962 / 0.579283 (-0.278321) | 0.126863 / 0.434364 (-0.307501) | 0.341107 / 0.540337 (-0.199230) | 0.439819 / 1.386936 (-0.947117) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b2754625d45e153bd9758af40e65e7545321fc2a \"CML watermark\")\n" ]
2024-06-20T07:27:14Z
2024-06-26T19:41:55Z
2024-06-26T19:35:42Z
MEMBER
null
null
null
Remove beam, as part of the 3.0 release.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6987/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6987/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6987.diff", "html_url": "https://github.com/huggingface/datasets/pull/6987", "merged_at": "2024-06-26T19:35:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/6987.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6987" }
https://api.github.com/repos/huggingface/datasets/issues/5565
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5565/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5565/comments
https://api.github.com/repos/huggingface/datasets/issues/5565/events
https://github.com/huggingface/datasets/pull/5565
1,595,281,752
PR_kwDODunzps5KhfTH
5,565
Add writer_batch_size for ArrowBasedBuilder
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008745 / 0.011353 (-0.002608) | 0.004651 / 0.011008 (-0.006357) | 0.099678 / 0.038508 (0.061170) | 0.029441 / 0.023109 (0.006332) | 0.300314 / 0.275898 (0.024416) | 0.342022 / 0.323480 (0.018542) | 0.006965 / 0.007986 (-0.001021) | 0.003382 / 0.004328 (-0.000946) | 0.078195 / 0.004250 (0.073945) | 0.033308 / 0.037052 (-0.003744) | 0.300857 / 0.258489 (0.042368) | 0.356763 / 0.293841 (0.062922) | 0.033919 / 0.128546 (-0.094627) | 0.011436 / 0.075646 (-0.064210) | 0.319581 / 0.419271 (-0.099691) | 0.041303 / 0.043533 (-0.002229) | 0.299387 / 0.255139 (0.044248) | 0.327783 / 0.283200 (0.044583) | 0.087210 / 0.141683 (-0.054473) | 1.498757 / 1.452155 (0.046603) | 1.560417 / 1.492716 (0.067701) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.191806 / 0.018006 (0.173800) | 0.407044 / 0.000490 (0.406554) | 0.005116 / 0.000200 (0.004916) | 0.000073 / 0.000054 (0.000019) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023760 / 0.037411 (-0.013652) | 0.096844 / 0.014526 (0.082318) | 0.104710 / 0.176557 (-0.071847) | 0.168161 / 0.737135 (-0.568974) | 0.107808 / 0.296338 (-0.188531) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.417707 / 0.215209 (0.202498) | 4.155952 / 2.077655 (2.078297) | 1.864934 / 1.504120 (0.360814) | 1.654925 / 1.541195 (0.113730) | 1.731341 / 1.468490 (0.262851) | 0.692014 / 4.584777 (-3.892763) | 3.407318 / 3.745712 (-0.338394) | 3.394791 / 5.269862 (-1.875071) | 1.650429 / 4.565676 (-2.915247) | 0.082177 / 0.424275 (-0.342098) | 0.012463 / 0.007607 (0.004856) | 0.523681 / 0.226044 (0.297637) | 5.249426 / 2.268929 (2.980498) | 2.327443 / 55.444624 (-53.117181) | 1.982160 / 6.876477 (-4.894317) | 2.019822 / 2.142072 (-0.122250) | 0.804820 / 4.805227 (-4.000408) | 0.148423 / 6.500664 (-6.352241) | 0.064938 / 0.075469 (-0.010531) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.225722 / 1.841788 (-0.616066) | 13.774257 / 8.074308 (5.699949) | 14.090298 / 10.191392 (3.898906) | 0.152489 / 0.680424 (-0.527935) | 0.028595 / 0.534201 (-0.505606) | 0.399011 / 0.579283 (-0.180272) | 0.399546 / 0.434364 (-0.034818) | 0.485513 / 0.540337 (-0.054824) | 0.564055 / 1.386936 (-0.822881) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006891 / 0.011353 (-0.004462) | 0.004557 / 0.011008 (-0.006451) | 0.077868 / 0.038508 (0.039360) | 0.028767 / 0.023109 (0.005657) | 0.344127 / 0.275898 (0.068229) | 0.377097 / 0.323480 (0.053617) | 0.005119 / 0.007986 (-0.002866) | 0.003547 / 0.004328 (-0.000782) | 0.077047 / 0.004250 (0.072796) | 0.043037 / 0.037052 (0.005984) | 0.341900 / 0.258489 (0.083410) | 0.384570 / 0.293841 (0.090729) | 0.032606 / 0.128546 (-0.095940) | 0.011752 / 0.075646 (-0.063894) | 0.086731 / 0.419271 (-0.332540) | 0.045459 / 0.043533 (0.001926) | 0.339308 / 0.255139 (0.084169) | 0.370498 / 0.283200 (0.087298) | 0.096237 / 0.141683 (-0.045446) | 1.499253 / 1.452155 (0.047098) | 1.583871 / 1.492716 (0.091154) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.245471 / 0.018006 (0.227465) | 0.408750 / 0.000490 (0.408260) | 0.008992 / 0.000200 (0.008792) | 0.000249 / 0.000054 (0.000194) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025508 / 0.037411 (-0.011903) | 0.102103 / 0.014526 (0.087578) | 0.109247 / 0.176557 (-0.067310) | 0.176369 / 0.737135 (-0.560766) | 0.111241 / 0.296338 (-0.185097) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.437209 / 0.215209 (0.222000) | 4.354386 / 2.077655 (2.276731) | 2.064008 / 1.504120 (0.559888) | 1.855518 / 1.541195 (0.314323) | 1.931647 / 1.468490 (0.463157) | 0.704913 / 4.584777 (-3.879864) | 3.397913 / 3.745712 (-0.347800) | 1.871524 / 5.269862 (-3.398338) | 1.176492 / 4.565676 (-3.389185) | 0.083976 / 0.424275 (-0.340299) | 0.012806 / 0.007607 (0.005199) | 0.539138 / 0.226044 (0.313094) | 5.401493 / 2.268929 (3.132564) | 2.539185 / 55.444624 (-52.905440) | 2.186445 / 6.876477 (-4.690031) | 2.222170 / 2.142072 (0.080097) | 0.815641 / 4.805227 (-3.989586) | 0.153033 / 6.500664 (-6.347631) | 0.069168 / 0.075469 (-0.006301) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.283530 / 1.841788 (-0.558258) | 14.075831 / 8.074308 (6.001523) | 13.649137 / 10.191392 (3.457745) | 0.127517 / 0.680424 (-0.552907) | 0.016619 / 0.534201 (-0.517582) | 0.377400 / 0.579283 (-0.201883) | 0.410796 / 0.434364 (-0.023568) | 0.463996 / 0.540337 (-0.076342) | 0.551867 / 1.386936 (-0.835069) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1135285d80ff9cd65fc51905f08343b4d7c2fa9c \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.009161 / 0.011353 (-0.002192) | 0.004987 / 0.011008 (-0.006022) | 0.098553 / 0.038508 (0.060045) | 0.034326 / 0.023109 (0.011216) | 0.295325 / 0.275898 (0.019427) | 0.326361 / 0.323480 (0.002881) | 0.007827 / 0.007986 (-0.000159) | 0.004933 / 0.004328 (0.000604) | 0.074236 / 0.004250 (0.069986) | 0.040410 / 0.037052 (0.003357) | 0.295644 / 0.258489 (0.037155) | 0.355050 / 0.293841 (0.061209) | 0.038384 / 0.128546 (-0.090162) | 0.011845 / 0.075646 (-0.063801) | 0.340678 / 0.419271 (-0.078594) | 0.047615 / 0.043533 (0.004082) | 0.292429 / 0.255139 (0.037290) | 0.312610 / 0.283200 (0.029410) | 0.100106 / 0.141683 (-0.041577) | 1.446186 / 1.452155 (-0.005969) | 1.534763 / 1.492716 (0.042046) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.213667 / 0.018006 (0.195661) | 0.447310 / 0.000490 (0.446820) | 0.000402 / 0.000200 (0.000202) | 0.000056 / 0.000054 (0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027604 / 0.037411 (-0.009807) | 0.112785 / 0.014526 (0.098259) | 0.119450 / 0.176557 (-0.057106) | 0.185728 / 0.737135 (-0.551407) | 0.122860 / 0.296338 (-0.173478) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.399162 / 0.215209 (0.183953) | 3.992701 / 2.077655 (1.915046) | 1.773881 / 1.504120 (0.269761) | 1.589842 / 1.541195 (0.048647) | 1.670065 / 1.468490 (0.201575) | 0.707669 / 4.584777 (-3.877107) | 3.719657 / 3.745712 (-0.026055) | 2.139629 / 5.269862 (-3.130232) | 1.467224 / 4.565676 (-3.098453) | 0.086033 / 0.424275 (-0.338242) | 0.012151 / 0.007607 (0.004544) | 0.519700 / 0.226044 (0.293656) | 5.150254 / 2.268929 (2.881325) | 2.305076 / 55.444624 (-53.139548) | 1.927914 / 6.876477 (-4.948563) | 1.999461 / 2.142072 (-0.142612) | 0.851819 / 4.805227 (-3.953408) | 0.165513 / 6.500664 (-6.335151) | 0.061898 / 0.075469 (-0.013571) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.226251 / 1.841788 (-0.615536) | 14.990253 / 8.074308 (6.915945) | 14.658720 / 10.191392 (4.467328) | 0.191665 / 0.680424 (-0.488759) | 0.028768 / 0.534201 (-0.505433) | 0.443907 / 0.579283 (-0.135376) | 0.455183 / 0.434364 (0.020819) | 0.552760 / 0.540337 (0.012422) | 0.653927 / 1.386936 (-0.733009) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007677 / 0.011353 (-0.003675) | 0.005340 / 0.011008 (-0.005668) | 0.075644 / 0.038508 (0.037136) | 0.035046 / 0.023109 (0.011937) | 0.341437 / 0.275898 (0.065538) | 0.377782 / 0.323480 (0.054302) | 0.006091 / 0.007986 (-0.001895) | 0.004170 / 0.004328 (-0.000158) | 0.074294 / 0.004250 (0.070044) | 0.049851 / 0.037052 (0.012798) | 0.351691 / 0.258489 (0.093202) | 0.386020 / 0.293841 (0.092179) | 0.036884 / 0.128546 (-0.091662) | 0.012475 / 0.075646 (-0.063172) | 0.087267 / 0.419271 (-0.332005) | 0.058623 / 0.043533 (0.015090) | 0.347186 / 0.255139 (0.092047) | 0.355869 / 0.283200 (0.072669) | 0.112022 / 0.141683 (-0.029661) | 1.451798 / 1.452155 (-0.000357) | 1.553262 / 1.492716 (0.060546) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.233451 / 0.018006 (0.215445) | 0.444384 / 0.000490 (0.443895) | 0.003695 / 0.000200 (0.003495) | 0.000088 / 0.000054 (0.000034) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.029686 / 0.037411 (-0.007725) | 0.113736 / 0.014526 (0.099210) | 0.123998 / 0.176557 (-0.052559) | 0.197847 / 0.737135 (-0.539288) | 0.129936 / 0.296338 (-0.166403) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.421904 / 0.215209 (0.206695) | 4.203533 / 2.077655 (2.125878) | 2.038199 / 1.504120 (0.534079) | 1.832402 / 1.541195 (0.291208) | 1.930765 / 1.468490 (0.462274) | 0.709775 / 4.584777 (-3.875002) | 3.760893 / 3.745712 (0.015181) | 2.091185 / 5.269862 (-3.178677) | 1.342248 / 4.565676 (-3.223428) | 0.087770 / 0.424275 (-0.336505) | 0.012357 / 0.007607 (0.004750) | 0.519605 / 0.226044 (0.293560) | 5.215883 / 2.268929 (2.946954) | 2.510200 / 55.444624 (-52.934425) | 2.192482 / 6.876477 (-4.683995) | 2.290214 / 2.142072 (0.148141) | 0.872067 / 4.805227 (-3.933160) | 0.168491 / 6.500664 (-6.332173) | 0.064707 / 0.075469 (-0.010762) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.291956 / 1.841788 (-0.549832) | 15.244530 / 8.074308 (7.170222) | 13.594895 / 10.191392 (3.403503) | 0.172669 / 0.680424 (-0.507755) | 0.017765 / 0.534201 (-0.516436) | 0.426946 / 0.579283 (-0.152337) | 0.442843 / 0.434364 (0.008479) | 0.549683 / 0.540337 (0.009346) | 0.653433 / 1.386936 (-0.733503) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b54a6d21795cf6cc50a13ff870648241a60fd2e0 \"CML watermark\")\n", "Can you review this @mariosasko ? since Albert is off", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008396 / 0.011353 (-0.002957) | 0.004556 / 0.011008 (-0.006452) | 0.101343 / 0.038508 (0.062835) | 0.029137 / 0.023109 (0.006027) | 0.298553 / 0.275898 (0.022655) | 0.334050 / 0.323480 (0.010570) | 0.006746 / 0.007986 (-0.001239) | 0.005050 / 0.004328 (0.000721) | 0.076055 / 0.004250 (0.071804) | 0.031988 / 0.037052 (-0.005064) | 0.301324 / 0.258489 (0.042835) | 0.340121 / 0.293841 (0.046280) | 0.033827 / 0.128546 (-0.094720) | 0.011447 / 0.075646 (-0.064200) | 0.321827 / 0.419271 (-0.097445) | 0.040846 / 0.043533 (-0.002687) | 0.296957 / 0.255139 (0.041818) | 0.324178 / 0.283200 (0.040979) | 0.083852 / 0.141683 (-0.057831) | 1.456123 / 1.452155 (0.003968) | 1.538311 / 1.492716 (0.045595) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.208897 / 0.018006 (0.190891) | 0.430560 / 0.000490 (0.430070) | 0.002917 / 0.000200 (0.002717) | 0.000077 / 0.000054 (0.000022) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024332 / 0.037411 (-0.013080) | 0.101659 / 0.014526 (0.087133) | 0.107636 / 0.176557 (-0.068920) | 0.168805 / 0.737135 (-0.568330) | 0.111404 / 0.296338 (-0.184934) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.412704 / 0.215209 (0.197495) | 4.124852 / 2.077655 (2.047197) | 1.843555 / 1.504120 (0.339435) | 1.641636 / 1.541195 (0.100441) | 1.755783 / 1.468490 (0.287293) | 0.693212 / 4.584777 (-3.891565) | 3.391803 / 3.745712 (-0.353909) | 1.954473 / 5.269862 (-3.315389) | 1.274395 / 4.565676 (-3.291282) | 0.082536 / 0.424275 (-0.341739) | 0.012335 / 0.007607 (0.004728) | 0.523720 / 0.226044 (0.297676) | 5.268339 / 2.268929 (2.999411) | 2.318163 / 55.444624 (-53.126461) | 1.978503 / 6.876477 (-4.897974) | 2.046689 / 2.142072 (-0.095384) | 0.806735 / 4.805227 (-3.998492) | 0.148010 / 6.500664 (-6.352654) | 0.065305 / 0.075469 (-0.010164) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.266950 / 1.841788 (-0.574838) | 13.870803 / 8.074308 (5.796495) | 14.272556 / 10.191392 (4.081164) | 0.151703 / 0.680424 (-0.528720) | 0.028991 / 0.534201 (-0.505210) | 0.400831 / 0.579283 (-0.178452) | 0.400891 / 0.434364 (-0.033473) | 0.476225 / 0.540337 (-0.064113) | 0.564925 / 1.386936 (-0.822011) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006810 / 0.011353 (-0.004543) | 0.004544 / 0.011008 (-0.006464) | 0.076516 / 0.038508 (0.038008) | 0.027705 / 0.023109 (0.004596) | 0.343215 / 0.275898 (0.067317) | 0.379136 / 0.323480 (0.055656) | 0.005227 / 0.007986 (-0.002758) | 0.003527 / 0.004328 (-0.000801) | 0.074775 / 0.004250 (0.070524) | 0.041700 / 0.037052 (0.004648) | 0.343612 / 0.258489 (0.085123) | 0.385657 / 0.293841 (0.091817) | 0.032082 / 0.128546 (-0.096464) | 0.011567 / 0.075646 (-0.064079) | 0.083814 / 0.419271 (-0.335458) | 0.042173 / 0.043533 (-0.001360) | 0.340261 / 0.255139 (0.085122) | 0.364778 / 0.283200 (0.081578) | 0.093401 / 0.141683 (-0.048282) | 1.513475 / 1.452155 (0.061320) | 1.599393 / 1.492716 (0.106677) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.237117 / 0.018006 (0.219111) | 0.424241 / 0.000490 (0.423751) | 0.002900 / 0.000200 (0.002700) | 0.000076 / 0.000054 (0.000021) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.031122 / 0.037411 (-0.006289) | 0.107530 / 0.014526 (0.093004) | 0.117777 / 0.176557 (-0.058780) | 0.188300 / 0.737135 (-0.548836) | 0.119989 / 0.296338 (-0.176349) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.438563 / 0.215209 (0.223354) | 4.404969 / 2.077655 (2.327315) | 2.260182 / 1.504120 (0.756062) | 2.035472 / 1.541195 (0.494277) | 2.045685 / 1.468490 (0.577195) | 0.706758 / 4.584777 (-3.878019) | 3.434843 / 3.745712 (-0.310869) | 1.909533 / 5.269862 (-3.360328) | 1.175374 / 4.565676 (-3.390303) | 0.084831 / 0.424275 (-0.339444) | 0.012441 / 0.007607 (0.004833) | 0.551818 / 0.226044 (0.325774) | 5.509005 / 2.268929 (3.240077) | 2.576545 / 55.444624 (-52.868080) | 2.226204 / 6.876477 (-4.650272) | 2.276544 / 2.142072 (0.134471) | 0.818069 / 4.805227 (-3.987158) | 0.152797 / 6.500664 (-6.347867) | 0.067896 / 0.075469 (-0.007573) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.276859 / 1.841788 (-0.564929) | 14.312914 / 8.074308 (6.238606) | 13.406602 / 10.191392 (3.215210) | 0.157466 / 0.680424 (-0.522958) | 0.016709 / 0.534201 (-0.517492) | 0.390951 / 0.579283 (-0.188333) | 0.395525 / 0.434364 (-0.038839) | 0.484486 / 0.540337 (-0.055852) | 0.576125 / 1.386936 (-0.810811) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b951e1b6cdd927604599f1aa5dadfb8ee8e62e05 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007316 / 0.011353 (-0.004037) | 0.005041 / 0.011008 (-0.005968) | 0.100477 / 0.038508 (0.061969) | 0.034068 / 0.023109 (0.010959) | 0.351156 / 0.275898 (0.075258) | 0.373892 / 0.323480 (0.050412) | 0.005748 / 0.007986 (-0.002237) | 0.003959 / 0.004328 (-0.000370) | 0.075540 / 0.004250 (0.071290) | 0.045282 / 0.037052 (0.008230) | 0.362364 / 0.258489 (0.103874) | 0.376461 / 0.293841 (0.082620) | 0.036724 / 0.128546 (-0.091822) | 0.012008 / 0.075646 (-0.063638) | 0.333802 / 0.419271 (-0.085470) | 0.050107 / 0.043533 (0.006574) | 0.348003 / 0.255139 (0.092864) | 0.367187 / 0.283200 (0.083988) | 0.103171 / 0.141683 (-0.038511) | 1.448281 / 1.452155 (-0.003874) | 1.516231 / 1.492716 (0.023514) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.203651 / 0.018006 (0.185645) | 0.438103 / 0.000490 (0.437613) | 0.004165 / 0.000200 (0.003966) | 0.000085 / 0.000054 (0.000030) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027068 / 0.037411 (-0.010343) | 0.111728 / 0.014526 (0.097202) | 0.116963 / 0.176557 (-0.059594) | 0.172652 / 0.737135 (-0.564483) | 0.124257 / 0.296338 (-0.172082) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.407937 / 0.215209 (0.192728) | 4.066008 / 2.077655 (1.988353) | 1.895000 / 1.504120 (0.390880) | 1.698422 / 1.541195 (0.157227) | 1.872446 / 1.468490 (0.403956) | 0.688888 / 4.584777 (-3.895889) | 3.743635 / 3.745712 (-0.002077) | 2.161507 / 5.269862 (-3.108354) | 1.485218 / 4.565676 (-3.080458) | 0.085959 / 0.424275 (-0.338316) | 0.012554 / 0.007607 (0.004947) | 0.510953 / 0.226044 (0.284909) | 5.103241 / 2.268929 (2.834312) | 2.439670 / 55.444624 (-53.004955) | 2.057089 / 6.876477 (-4.819387) | 2.240137 / 2.142072 (0.098065) | 0.847750 / 4.805227 (-3.957477) | 0.172952 / 6.500664 (-6.327712) | 0.066023 / 0.075469 (-0.009446) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.190677 / 1.841788 (-0.651110) | 14.593162 / 8.074308 (6.518854) | 14.254983 / 10.191392 (4.063591) | 0.155811 / 0.680424 (-0.524613) | 0.017698 / 0.534201 (-0.516503) | 0.420455 / 0.579283 (-0.158828) | 0.412146 / 0.434364 (-0.022218) | 0.493113 / 0.540337 (-0.047225) | 0.582097 / 1.386936 (-0.804839) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007319 / 0.011353 (-0.004033) | 0.005102 / 0.011008 (-0.005906) | 0.073760 / 0.038508 (0.035252) | 0.033496 / 0.023109 (0.010387) | 0.338778 / 0.275898 (0.062880) | 0.371870 / 0.323480 (0.048391) | 0.005804 / 0.007986 (-0.002182) | 0.004142 / 0.004328 (-0.000186) | 0.073203 / 0.004250 (0.068953) | 0.046568 / 0.037052 (0.009516) | 0.343544 / 0.258489 (0.085055) | 0.381188 / 0.293841 (0.087347) | 0.036391 / 0.128546 (-0.092155) | 0.012046 / 0.075646 (-0.063600) | 0.086007 / 0.419271 (-0.333265) | 0.048706 / 0.043533 (0.005173) | 0.330836 / 0.255139 (0.075697) | 0.355328 / 0.283200 (0.072128) | 0.100104 / 0.141683 (-0.041579) | 1.434237 / 1.452155 (-0.017917) | 1.549380 / 1.492716 (0.056663) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.231099 / 0.018006 (0.213093) | 0.450650 / 0.000490 (0.450160) | 0.000404 / 0.000200 (0.000204) | 0.000059 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030534 / 0.037411 (-0.006877) | 0.119005 / 0.014526 (0.104479) | 0.125362 / 0.176557 (-0.051195) | 0.176823 / 0.737135 (-0.560313) | 0.132044 / 0.296338 (-0.164295) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.431004 / 0.215209 (0.215795) | 4.318969 / 2.077655 (2.241315) | 1.994941 / 1.504120 (0.490821) | 1.791870 / 1.541195 (0.250675) | 1.904134 / 1.468490 (0.435644) | 0.723493 / 4.584777 (-3.861284) | 3.823670 / 3.745712 (0.077958) | 2.118892 / 5.269862 (-3.150969) | 1.375088 / 4.565676 (-3.190588) | 0.088875 / 0.424275 (-0.335400) | 0.013137 / 0.007607 (0.005530) | 0.530523 / 0.226044 (0.304479) | 5.341438 / 2.268929 (3.072509) | 2.459044 / 55.444624 (-52.985580) | 2.150119 / 6.876477 (-4.726357) | 2.228567 / 2.142072 (0.086494) | 0.877549 / 4.805227 (-3.927678) | 0.175040 / 6.500664 (-6.325625) | 0.068188 / 0.075469 (-0.007281) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.273780 / 1.841788 (-0.568008) | 15.206331 / 8.074308 (7.132023) | 14.963058 / 10.191392 (4.771666) | 0.184543 / 0.680424 (-0.495881) | 0.017612 / 0.534201 (-0.516589) | 0.426248 / 0.579283 (-0.153035) | 0.437889 / 0.434364 (0.003525) | 0.508979 / 0.540337 (-0.031359) | 0.602040 / 1.386936 (-0.784896) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c5ca1d86949ec3a5fdaec03b80500fb822bcfab4 \"CML watermark\")\n" ]
2023-02-22T15:09:30Z
2023-03-10T13:53:03Z
2023-03-10T13:45:43Z
MEMBER
null
null
null
This way we can control the size of the record_batches/row_groups of arrow/parquet files. This can be useful for `datasets-server` to keep control of the row groups size which can affect random access performance for audio/image/video datasets Right now having 1,000 examples per row group cause some image datasets to be pretty slow for random access (e.g. 4seconds for `beans` to get 20 rows)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5565/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5565/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5565.diff", "html_url": "https://github.com/huggingface/datasets/pull/5565", "merged_at": "2023-03-10T13:45:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/5565.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5565" }
https://api.github.com/repos/huggingface/datasets/issues/5384
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5384/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5384/comments
https://api.github.com/repos/huggingface/datasets/issues/5384/events
https://github.com/huggingface/datasets/pull/5384
1,508,152,598
PR_kwDODunzps5GDmR6
5,384
Handle 0-dim tensors in `cast_to_python_objects`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.010576 / 0.011353 (-0.000777) | 0.006010 / 0.011008 (-0.004998) | 0.109375 / 0.038508 (0.070867) | 0.037780 / 0.023109 (0.014670) | 0.381552 / 0.275898 (0.105654) | 0.446039 / 0.323480 (0.122559) | 0.009004 / 0.007986 (0.001019) | 0.005653 / 0.004328 (0.001324) | 0.087027 / 0.004250 (0.082776) | 0.040346 / 0.037052 (0.003293) | 0.398827 / 0.258489 (0.140338) | 0.407281 / 0.293841 (0.113440) | 0.051723 / 0.128546 (-0.076824) | 0.020254 / 0.075646 (-0.055392) | 0.376841 / 0.419271 (-0.042430) | 0.055505 / 0.043533 (0.011972) | 0.383464 / 0.255139 (0.128325) | 0.436130 / 0.283200 (0.152930) | 0.117403 / 0.141683 (-0.024280) | 1.569016 / 1.452155 (0.116862) | 1.889831 / 1.492716 (0.397115) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.297962 / 0.018006 (0.279956) | 0.683699 / 0.000490 (0.683210) | 0.000918 / 0.000200 (0.000718) | 0.000100 / 0.000054 (0.000045) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026742 / 0.037411 (-0.010669) | 0.125293 / 0.014526 (0.110768) | 0.128769 / 0.176557 (-0.047787) | 0.179447 / 0.737135 (-0.557688) | 0.142032 / 0.296338 (-0.154306) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.588389 / 0.215209 (0.373180) | 5.943514 / 2.077655 (3.865859) | 2.631163 / 1.504120 (1.127043) | 1.865446 / 1.541195 (0.324252) | 2.055610 / 1.468490 (0.587120) | 1.090288 / 4.584777 (-3.494489) | 5.457151 / 3.745712 (1.711439) | 5.645614 / 5.269862 (0.375752) | 2.849492 / 4.565676 (-1.716184) | 0.140447 / 0.424275 (-0.283828) | 0.015421 / 0.007607 (0.007813) | 0.735528 / 0.226044 (0.509484) | 7.394097 / 2.268929 (5.125169) | 3.219714 / 55.444624 (-52.224911) | 2.504134 / 6.876477 (-4.372342) | 2.524291 / 2.142072 (0.382219) | 1.452776 / 4.805227 (-3.352452) | 0.256142 / 6.500664 (-6.244522) | 0.093809 / 0.075469 (0.018340) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.570046 / 1.841788 (-0.271742) | 17.360385 / 8.074308 (9.286077) | 20.750595 / 10.191392 (10.559203) | 0.218486 / 0.680424 (-0.461938) | 0.048527 / 0.534201 (-0.485674) | 0.549568 / 0.579283 (-0.029715) | 0.633993 / 0.434364 (0.199629) | 0.632585 / 0.540337 (0.092248) | 0.712817 / 1.386936 (-0.674119) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.010524 / 0.011353 (-0.000829) | 0.006307 / 0.011008 (-0.004701) | 0.129671 / 0.038508 (0.091162) | 0.038952 / 0.023109 (0.015842) | 0.421936 / 0.275898 (0.146038) | 0.489911 / 0.323480 (0.166431) | 0.007661 / 0.007986 (-0.000325) | 0.005430 / 0.004328 (0.001102) | 0.091851 / 0.004250 (0.087600) | 0.059755 / 0.037052 (0.022703) | 0.449810 / 0.258489 (0.191321) | 0.519498 / 0.293841 (0.225657) | 0.061644 / 0.128546 (-0.066902) | 0.018950 / 0.075646 (-0.056696) | 0.399149 / 0.419271 (-0.020122) | 0.067670 / 0.043533 (0.024137) | 0.441091 / 0.255139 (0.185952) | 0.459327 / 0.283200 (0.176128) | 0.122476 / 0.141683 (-0.019207) | 1.760129 / 1.452155 (0.307974) | 1.767945 / 1.492716 (0.275228) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.276675 / 0.018006 (0.258669) | 0.606798 / 0.000490 (0.606308) | 0.000449 / 0.000200 (0.000249) | 0.000078 / 0.000054 (0.000023) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027762 / 0.037411 (-0.009649) | 0.108330 / 0.014526 (0.093805) | 0.134714 / 0.176557 (-0.041843) | 0.175666 / 0.737135 (-0.561470) | 0.134917 / 0.296338 (-0.161421) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.676756 / 0.215209 (0.461547) | 6.746519 / 2.077655 (4.668864) | 2.660869 / 1.504120 (1.156750) | 2.273688 / 1.541195 (0.732494) | 2.392580 / 1.468490 (0.924090) | 1.127848 / 4.584777 (-3.456929) | 5.356499 / 3.745712 (1.610787) | 2.933006 / 5.269862 (-2.336855) | 1.872877 / 4.565676 (-2.692799) | 0.139504 / 0.424275 (-0.284771) | 0.013501 / 0.007607 (0.005894) | 0.749888 / 0.226044 (0.523843) | 8.157031 / 2.268929 (5.888103) | 3.627751 / 55.444624 (-51.816874) | 2.713152 / 6.876477 (-4.163324) | 2.934585 / 2.142072 (0.792512) | 1.376398 / 4.805227 (-3.428829) | 0.251537 / 6.500664 (-6.249127) | 0.083995 / 0.075469 (0.008526) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.635446 / 1.841788 (-0.206342) | 18.435807 / 8.074308 (10.361498) | 21.395291 / 10.191392 (11.203899) | 0.247238 / 0.680424 (-0.433186) | 0.030503 / 0.534201 (-0.503698) | 0.553096 / 0.579283 (-0.026187) | 0.597583 / 0.434364 (0.163219) | 0.594135 / 0.540337 (0.053797) | 0.673815 / 1.386936 (-0.713122) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png \"CML watermark\")\n" ]
2022-12-22T16:15:30Z
2023-01-13T16:10:15Z
2023-01-13T16:00:52Z
COLLABORATOR
null
null
null
Fix #5229
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5384/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5384/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5384.diff", "html_url": "https://github.com/huggingface/datasets/pull/5384", "merged_at": "2023-01-13T16:00:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/5384.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5384" }
https://api.github.com/repos/huggingface/datasets/issues/5247
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5247/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5247/comments
https://api.github.com/repos/huggingface/datasets/issues/5247/events
https://github.com/huggingface/datasets/pull/5247
1,451,297,749
PR_kwDODunzps5DAhto
5,247
Set dev version
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5247). All of your documentation changes will be reflected on that endpoint." ]
2022-11-16T10:17:31Z
2022-11-16T10:22:20Z
2022-11-16T10:17:50Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5247/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5247/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5247.diff", "html_url": "https://github.com/huggingface/datasets/pull/5247", "merged_at": "2022-11-16T10:17:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/5247.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5247" }
https://api.github.com/repos/huggingface/datasets/issues/7115
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7115/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7115/comments
https://api.github.com/repos/huggingface/datasets/issues/7115/events
https://github.com/huggingface/datasets/issues/7115
2,475,363,142
I_kwDODunzps6TiwtG
7,115
module 'pyarrow.lib' has no attribute 'ListViewType'
{ "avatar_url": "https://avatars.githubusercontent.com/u/175128880?v=4", "events_url": "https://api.github.com/users/neurafusionai/events{/privacy}", "followers_url": "https://api.github.com/users/neurafusionai/followers", "following_url": "https://api.github.com/users/neurafusionai/following{/other_user}", "gists_url": "https://api.github.com/users/neurafusionai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/neurafusionai", "id": 175128880, "login": "neurafusionai", "node_id": "U_kgDOCnBBMA", "organizations_url": "https://api.github.com/users/neurafusionai/orgs", "received_events_url": "https://api.github.com/users/neurafusionai/received_events", "repos_url": "https://api.github.com/users/neurafusionai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/neurafusionai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neurafusionai/subscriptions", "type": "User", "url": "https://api.github.com/users/neurafusionai", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "https://github.com/neurafusionai/Hugging_Face/blob/main/meta_opt_350m_customer_support_lora_v1.ipynb\r\n\r\ncouldnt train because of GPU\r\nI didnt pip install datasets -U\r\nbut looks like restarting worked" ]
2024-08-20T11:05:44Z
2024-09-10T06:51:08Z
2024-09-10T06:51:08Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Code: `!pipuninstall -y pyarrow !pip install --no-cache-dir pyarrow !pip uninstall -y pyarrow !pip install pyarrow --no-cache-dir !pip install --upgrade datasets transformers pyarrow !pip install pyarrow.parquet ! pip install pyarrow-core libparquet !pip install pyarrow --no-cache-dir !pip install pyarrow !pip install transformers !pip install --upgrade datasets !pip install datasets ! pip install pyarrow ! pip install pyarrow.lib ! pip install pyarrow.parquet !pip install transformers import pyarrow as pa print(pa.__version__) from datasets import load_dataset import pyarrow.parquet as pq import pyarrow.lib as lib import pandas as pd from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments from datasets import load_dataset from transformers import AutoTokenizer ! pip install pyarrow-core libparquet # Load the dataset for content moderation dataset = load_dataset("PolyAI/banking77") # Example dataset for customer support # Initialize the tokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") # Tokenize the dataset def tokenize_function(examples): return tokenizer(examples['text'], padding="max_length", truncation=True) # Apply tokenization to the entire dataset tokenized_datasets = dataset.map(tokenize_function, batched=True) # Check the first few tokenized samples print(tokenized_datasets['train'][0]) from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments # Load the model model = AutoModelForSequenceClassification.from_pretrained("facebook/opt-350m", num_labels=77) # Define training arguments training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, eval_strategy="epoch", # save_strategy="epoch", logging_dir="./logs", learning_rate=2e-5, ) # Initialize the Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], ) # Train the model trainer.train() # Evaluate the model trainer.evaluate() ` AttributeError Traceback (most recent call last) [<ipython-input-23-60bed3143a93>](https://localhost:8080/#) in <cell line: 22>() 20 21 ---> 22 from datasets import load_dataset 23 import pyarrow.parquet as pq 24 import pyarrow.lib as lib 5 frames [/usr/local/lib/python3.10/dist-packages/datasets/__init__.py](https://localhost:8080/#) in <module> 15 __version__ = "2.21.0" 16 ---> 17 from .arrow_dataset import Dataset 18 from .arrow_reader import ReadInstruction 19 from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder [/usr/local/lib/python3.10/dist-packages/datasets/arrow_dataset.py](https://localhost:8080/#) in <module> 74 75 from . import config ---> 76 from .arrow_reader import ArrowReader 77 from .arrow_writer import ArrowWriter, OptimizedTypedSequence 78 from .data_files import sanitize_patterns [/usr/local/lib/python3.10/dist-packages/datasets/arrow_reader.py](https://localhost:8080/#) in <module> 27 28 import pyarrow as pa ---> 29 import pyarrow.parquet as pq 30 from tqdm.contrib.concurrent import thread_map 31 [/usr/local/lib/python3.10/dist-packages/pyarrow/parquet/__init__.py](https://localhost:8080/#) in <module> 18 # flake8: noqa 19 ---> 20 from .core import * [/usr/local/lib/python3.10/dist-packages/pyarrow/parquet/core.py](https://localhost:8080/#) in <module> 31 32 try: ---> 33 import pyarrow._parquet as _parquet 34 except ImportError as exc: 35 raise ImportError( /usr/local/lib/python3.10/dist-packages/pyarrow/_parquet.pyx in init pyarrow._parquet() AttributeError: module 'pyarrow.lib' has no attribute 'ListViewType' ### Steps to reproduce the bug https://colab.research.google.com/drive/1HNbsg3tHxUJOHVtYIaRnNGY4T2PnLn4a?usp=sharing ### Expected behavior Looks like there is an issue with datasets and pyarrow ### Environment info google colab python huggingface Found existing installation: pyarrow 17.0.0 Uninstalling pyarrow-17.0.0: Successfully uninstalled pyarrow-17.0.0 Collecting pyarrow Downloading pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl.metadata (3.3 kB) Requirement already satisfied: numpy>=1.16.6 in /usr/local/lib/python3.10/dist-packages (from pyarrow) (1.26.4) Downloading pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl (39.9 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 39.9/39.9 MB 188.9 MB/s eta 0:00:00 Installing collected packages: pyarrow ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. cudf-cu12 24.4.1 requires pyarrow<15.0.0a0,>=14.0.1, but you have pyarrow 17.0.0 which is incompatible. ibis-framework 8.0.0 requires pyarrow<16,>=2, but you have pyarrow 17.0.0 which is incompatible. Successfully installed pyarrow-17.0.0 WARNING: The following packages were previously imported in this runtime: [pyarrow] You must restart the runtime in order to use newly installed versions.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7115/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7115/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/4731
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4731/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4731/comments
https://api.github.com/repos/huggingface/datasets/issues/4731/events
https://github.com/huggingface/datasets/pull/4731
1,313,773,348
PR_kwDODunzps474dlZ
4,731
docs: ✏️ fix TranslationVariableLanguages example
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-07-21T20:35:41Z
2022-07-22T07:01:00Z
2022-07-22T06:48:42Z
COLLABORATOR
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4731/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4731/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4731.diff", "html_url": "https://github.com/huggingface/datasets/pull/4731", "merged_at": "2022-07-22T06:48:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/4731.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4731" }
https://api.github.com/repos/huggingface/datasets/issues/5222
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5222/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5222/comments
https://api.github.com/repos/huggingface/datasets/issues/5222/events
https://github.com/huggingface/datasets/issues/5222
1,442,412,507
I_kwDODunzps5V-Xfb
5,222
HuggingFace website is incorrectly reporting that my datasets are pickled
{ "avatar_url": "https://avatars.githubusercontent.com/u/10626398?v=4", "events_url": "https://api.github.com/users/ProGamerGov/events{/privacy}", "followers_url": "https://api.github.com/users/ProGamerGov/followers", "following_url": "https://api.github.com/users/ProGamerGov/following{/other_user}", "gists_url": "https://api.github.com/users/ProGamerGov/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ProGamerGov", "id": 10626398, "login": "ProGamerGov", "node_id": "MDQ6VXNlcjEwNjI2Mzk4", "organizations_url": "https://api.github.com/users/ProGamerGov/orgs", "received_events_url": "https://api.github.com/users/ProGamerGov/received_events", "repos_url": "https://api.github.com/users/ProGamerGov/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ProGamerGov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ProGamerGov/subscriptions", "type": "User", "url": "https://api.github.com/users/ProGamerGov", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "cc @McPatate maybe you know what's happening ?", "Yes I think I know what is happening. We check in zips for pickles, and the UI must display the pickle jar when a scan has an associated list of imports, even when empty.\r\n~I'll fix ASAP !~", "> I'll fix ASAP !\r\n\r\nActually I'd rather leave it like that for now, as it indicates that we checked for pickles and nothing dangerous appeared :)", "Closing the issue with the typical \"feature not a bug\" " ]
2022-11-09T16:41:16Z
2022-11-09T18:10:46Z
2022-11-09T18:06:57Z
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug HuggingFace is incorrectly reporting that my datasets are pickled. They are not picked, they are simple ZIP files containing PNG images. Hopefully this is the right location to report this bug. ### Steps to reproduce the bug Inspect my dataset respository here: https://huggingface.co/datasets/ProGamerGov/StableDiffusion-v1-5-Regularization-Images ### Expected behavior They should not be reported as being pickled. ### Environment info N/A
{ "avatar_url": "https://avatars.githubusercontent.com/u/9112841?v=4", "events_url": "https://api.github.com/users/McPatate/events{/privacy}", "followers_url": "https://api.github.com/users/McPatate/followers", "following_url": "https://api.github.com/users/McPatate/following{/other_user}", "gists_url": "https://api.github.com/users/McPatate/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/McPatate", "id": 9112841, "login": "McPatate", "node_id": "MDQ6VXNlcjkxMTI4NDE=", "organizations_url": "https://api.github.com/users/McPatate/orgs", "received_events_url": "https://api.github.com/users/McPatate/received_events", "repos_url": "https://api.github.com/users/McPatate/repos", "site_admin": false, "starred_url": "https://api.github.com/users/McPatate/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/McPatate/subscriptions", "type": "User", "url": "https://api.github.com/users/McPatate", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5222/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5222/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/4717
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4717/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4717/comments
https://api.github.com/repos/huggingface/datasets/issues/4717/events
https://github.com/huggingface/datasets/issues/4717
1,309,512,483
I_kwDODunzps5ODZMj
4,717
Dataset Viewer issue for LawalAfeez/englishreview-ds-mini
{ "avatar_url": "https://avatars.githubusercontent.com/u/69974956?v=4", "events_url": "https://api.github.com/users/lawalAfeez820/events{/privacy}", "followers_url": "https://api.github.com/users/lawalAfeez820/followers", "following_url": "https://api.github.com/users/lawalAfeez820/following{/other_user}", "gists_url": "https://api.github.com/users/lawalAfeez820/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lawalAfeez820", "id": 69974956, "login": "lawalAfeez820", "node_id": "MDQ6VXNlcjY5OTc0OTU2", "organizations_url": "https://api.github.com/users/lawalAfeez820/orgs", "received_events_url": "https://api.github.com/users/lawalAfeez820/received_events", "repos_url": "https://api.github.com/users/lawalAfeez820/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lawalAfeez820/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lawalAfeez820/subscriptions", "type": "User", "url": "https://api.github.com/users/lawalAfeez820", "user_view_type": "public" }
[ { "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co", "id": 3470211881, "name": "dataset-viewer", "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo", "user_view_type": "public" } ]
null
[ "It's currently working, as far as I understand\r\n\r\nhttps://huggingface.co/datasets/LawalAfeez/englishreview-ds-mini/viewer/LawalAfeez--englishreview-ds-mini/train\r\n\r\n<img width=\"1556\" alt=\"Capture d’écran 2022-07-19 à 09 24 01\" src=\"https://user-images.githubusercontent.com/1676121/179761130-2d7980b9-c0f6-4093-8b1d-f0a3872fef3f.png\">\r\n\r\n---\r\n\r\nWhat was your issue?" ]
2022-07-19T13:19:39Z
2022-07-20T08:32:57Z
2022-07-20T08:32:57Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Link _No response_ ### Description Unable to view the split data ### Owner _No response_
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4717/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4717/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5587
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5587/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5587/comments
https://api.github.com/repos/huggingface/datasets/issues/5587/events
https://github.com/huggingface/datasets/pull/5587
1,603,139,420
PR_kwDODunzps5K70pp
5,587
Fix `sort` with indices mapping
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008740 / 0.011353 (-0.002613) | 0.004501 / 0.011008 (-0.006507) | 0.100045 / 0.038508 (0.061537) | 0.029999 / 0.023109 (0.006890) | 0.303556 / 0.275898 (0.027658) | 0.335342 / 0.323480 (0.011863) | 0.006996 / 0.007986 (-0.000989) | 0.004183 / 0.004328 (-0.000145) | 0.076434 / 0.004250 (0.072183) | 0.033899 / 0.037052 (-0.003153) | 0.301312 / 0.258489 (0.042823) | 0.343136 / 0.293841 (0.049295) | 0.034062 / 0.128546 (-0.094484) | 0.011465 / 0.075646 (-0.064181) | 0.323134 / 0.419271 (-0.096137) | 0.040820 / 0.043533 (-0.002713) | 0.301708 / 0.255139 (0.046569) | 0.329528 / 0.283200 (0.046328) | 0.088393 / 0.141683 (-0.053290) | 1.460996 / 1.452155 (0.008842) | 1.531145 / 1.492716 (0.038429) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.191918 / 0.018006 (0.173912) | 0.414099 / 0.000490 (0.413610) | 0.000411 / 0.000200 (0.000211) | 0.000060 / 0.000054 (0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022707 / 0.037411 (-0.014704) | 0.096991 / 0.014526 (0.082465) | 0.106070 / 0.176557 (-0.070487) | 0.151275 / 0.737135 (-0.585860) | 0.108909 / 0.296338 (-0.187430) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.422499 / 0.215209 (0.207289) | 4.205551 / 2.077655 (2.127896) | 1.918960 / 1.504120 (0.414841) | 1.715421 / 1.541195 (0.174227) | 1.768969 / 1.468490 (0.300479) | 0.692243 / 4.584777 (-3.892534) | 3.382452 / 3.745712 (-0.363260) | 1.943695 / 5.269862 (-3.326166) | 1.250482 / 4.565676 (-3.315195) | 0.082084 / 0.424275 (-0.342191) | 0.012446 / 0.007607 (0.004839) | 0.525584 / 0.226044 (0.299539) | 5.275530 / 2.268929 (3.006602) | 2.386207 / 55.444624 (-53.058418) | 2.043920 / 6.876477 (-4.832557) | 2.030932 / 2.142072 (-0.111140) | 0.810233 / 4.805227 (-3.994994) | 0.148139 / 6.500664 (-6.352525) | 0.064617 / 0.075469 (-0.010852) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.227352 / 1.841788 (-0.614436) | 13.527623 / 8.074308 (5.453315) | 14.018551 / 10.191392 (3.827159) | 0.140333 / 0.680424 (-0.540091) | 0.028349 / 0.534201 (-0.505852) | 0.394904 / 0.579283 (-0.184379) | 0.406532 / 0.434364 (-0.027831) | 0.471714 / 0.540337 (-0.068624) | 0.568517 / 1.386936 (-0.818419) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006623 / 0.011353 (-0.004730) | 0.004464 / 0.011008 (-0.006544) | 0.076342 / 0.038508 (0.037834) | 0.027451 / 0.023109 (0.004341) | 0.343851 / 0.275898 (0.067953) | 0.385723 / 0.323480 (0.062243) | 0.005624 / 0.007986 (-0.002362) | 0.004685 / 0.004328 (0.000356) | 0.075669 / 0.004250 (0.071419) | 0.037297 / 0.037052 (0.000244) | 0.343363 / 0.258489 (0.084874) | 0.396115 / 0.293841 (0.102274) | 0.031577 / 0.128546 (-0.096970) | 0.011557 / 0.075646 (-0.064090) | 0.085626 / 0.419271 (-0.333645) | 0.041699 / 0.043533 (-0.001834) | 0.340826 / 0.255139 (0.085687) | 0.377167 / 0.283200 (0.093967) | 0.088632 / 0.141683 (-0.053051) | 1.464500 / 1.452155 (0.012345) | 1.556686 / 1.492716 (0.063969) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.231136 / 0.018006 (0.213130) | 0.402687 / 0.000490 (0.402197) | 0.000590 / 0.000200 (0.000390) | 0.000059 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024926 / 0.037411 (-0.012485) | 0.101062 / 0.014526 (0.086536) | 0.106481 / 0.176557 (-0.070075) | 0.159167 / 0.737135 (-0.577968) | 0.110948 / 0.296338 (-0.185390) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.441813 / 0.215209 (0.226603) | 4.416332 / 2.077655 (2.338677) | 2.080621 / 1.504120 (0.576501) | 1.877832 / 1.541195 (0.336637) | 1.944778 / 1.468490 (0.476288) | 0.704634 / 4.584777 (-3.880143) | 3.433955 / 3.745712 (-0.311758) | 1.863493 / 5.269862 (-3.406368) | 1.168869 / 4.565676 (-3.396807) | 0.084095 / 0.424275 (-0.340180) | 0.012440 / 0.007607 (0.004833) | 0.545122 / 0.226044 (0.319077) | 5.472214 / 2.268929 (3.203285) | 2.514580 / 55.444624 (-52.930044) | 2.164570 / 6.876477 (-4.711907) | 2.193467 / 2.142072 (0.051395) | 0.809056 / 4.805227 (-3.996171) | 0.152343 / 6.500664 (-6.348321) | 0.067610 / 0.075469 (-0.007859) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.280968 / 1.841788 (-0.560820) | 13.887674 / 8.074308 (5.813366) | 13.160405 / 10.191392 (2.969013) | 0.128601 / 0.680424 (-0.551823) | 0.016420 / 0.534201 (-0.517780) | 0.382810 / 0.579283 (-0.196473) | 0.394386 / 0.434364 (-0.039978) | 0.470254 / 0.540337 (-0.070083) | 0.566907 / 1.386936 (-0.820029) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#8cc6950322337ea8873939541c53858b10c0f3b9 \"CML watermark\")\n", "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.008673 / 0.011353 (-0.002679) | 0.004475 / 0.011008 (-0.006533) | 0.102060 / 0.038508 (0.063552) | 0.029438 / 0.023109 (0.006329) | 0.351785 / 0.275898 (0.075887) | 0.388199 / 0.323480 (0.064719) | 0.007011 / 0.007986 (-0.000974) | 0.003317 / 0.004328 (-0.001012) | 0.080931 / 0.004250 (0.076681) | 0.033449 / 0.037052 (-0.003603) | 0.360329 / 0.258489 (0.101840) | 0.400069 / 0.293841 (0.106228) | 0.033628 / 0.128546 (-0.094918) | 0.011462 / 0.075646 (-0.064184) | 0.323781 / 0.419271 (-0.095490) | 0.040686 / 0.043533 (-0.002847) | 0.332715 / 0.255139 (0.077576) | 0.370339 / 0.283200 (0.087139) | 0.084633 / 0.141683 (-0.057050) | 1.459452 / 1.452155 (0.007297) | 1.547719 / 1.492716 (0.055003) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.187051 / 0.018006 (0.169045) | 0.402625 / 0.000490 (0.402135) | 0.002218 / 0.000200 (0.002018) | 0.000070 / 0.000054 (0.000016) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025240 / 0.037411 (-0.012171) | 0.102201 / 0.014526 (0.087675) | 0.108629 / 0.176557 (-0.067927) | 0.156686 / 0.737135 (-0.580449) | 0.111383 / 0.296338 (-0.184955) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.418099 / 0.215209 (0.202890) | 4.163345 / 2.077655 (2.085690) | 1.868419 / 1.504120 (0.364300) | 1.662066 / 1.541195 (0.120871) | 1.705912 / 1.468490 (0.237422) | 0.696391 / 4.584777 (-3.888386) | 3.338307 / 3.745712 (-0.407405) | 1.923255 / 5.269862 (-3.346607) | 1.249220 / 4.565676 (-3.316457) | 0.082037 / 0.424275 (-0.342238) | 0.012232 / 0.007607 (0.004624) | 0.523913 / 0.226044 (0.297869) | 5.290036 / 2.268929 (3.021107) | 2.319729 / 55.444624 (-53.124896) | 1.987345 / 6.876477 (-4.889132) | 2.044516 / 2.142072 (-0.097556) | 0.812098 / 4.805227 (-3.993129) | 0.147327 / 6.500664 (-6.353337) | 0.063838 / 0.075469 (-0.011631) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.219652 / 1.841788 (-0.622136) | 13.271513 / 8.074308 (5.197205) | 13.799982 / 10.191392 (3.608590) | 0.150055 / 0.680424 (-0.530369) | 0.028804 / 0.534201 (-0.505397) | 0.395452 / 0.579283 (-0.183831) | 0.398758 / 0.434364 (-0.035606) | 0.468575 / 0.540337 (-0.071763) | 0.553324 / 1.386936 (-0.833612) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006498 / 0.011353 (-0.004855) | 0.004439 / 0.011008 (-0.006569) | 0.076525 / 0.038508 (0.038017) | 0.027184 / 0.023109 (0.004074) | 0.364705 / 0.275898 (0.088807) | 0.409481 / 0.323480 (0.086001) | 0.004831 / 0.007986 (-0.003154) | 0.004524 / 0.004328 (0.000196) | 0.075403 / 0.004250 (0.071153) | 0.039013 / 0.037052 (0.001960) | 0.364042 / 0.258489 (0.105553) | 0.413090 / 0.293841 (0.119249) | 0.032052 / 0.128546 (-0.096495) | 0.011514 / 0.075646 (-0.064132) | 0.085219 / 0.419271 (-0.334053) | 0.041448 / 0.043533 (-0.002085) | 0.350371 / 0.255139 (0.095232) | 0.386670 / 0.283200 (0.103470) | 0.089824 / 0.141683 (-0.051859) | 1.487392 / 1.452155 (0.035238) | 1.537201 / 1.492716 (0.044485) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.231555 / 0.018006 (0.213549) | 0.407505 / 0.000490 (0.407016) | 0.000382 / 0.000200 (0.000182) | 0.000060 / 0.000054 (0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026665 / 0.037411 (-0.010747) | 0.105852 / 0.014526 (0.091326) | 0.108228 / 0.176557 (-0.068328) | 0.164164 / 0.737135 (-0.572972) | 0.114284 / 0.296338 (-0.182054) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.448957 / 0.215209 (0.233748) | 4.500058 / 2.077655 (2.422403) | 2.331660 / 1.504120 (0.827541) | 2.119904 / 1.541195 (0.578710) | 2.101489 / 1.468490 (0.632999) | 0.696580 / 4.584777 (-3.888197) | 3.364206 / 3.745712 (-0.381506) | 2.550157 / 5.269862 (-2.719704) | 1.496455 / 4.565676 (-3.069222) | 0.083289 / 0.424275 (-0.340986) | 0.012283 / 0.007607 (0.004676) | 0.555581 / 0.226044 (0.329537) | 5.556284 / 2.268929 (3.287355) | 2.595261 / 55.444624 (-52.849363) | 2.234793 / 6.876477 (-4.641683) | 2.280150 / 2.142072 (0.138078) | 0.817885 / 4.805227 (-3.987343) | 0.151481 / 6.500664 (-6.349183) | 0.066764 / 0.075469 (-0.008705) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.318875 / 1.841788 (-0.522913) | 14.220380 / 8.074308 (6.146072) | 13.922773 / 10.191392 (3.731381) | 0.154608 / 0.680424 (-0.525816) | 0.016343 / 0.534201 (-0.517858) | 0.380758 / 0.579283 (-0.198525) | 0.392595 / 0.434364 (-0.041769) | 0.468844 / 0.540337 (-0.071493) | 0.561047 / 1.386936 (-0.825889) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#d57fdcf2c8110b4b599289695fa065d1fc4936d4 \"CML watermark\")\n" ]
2023-02-28T14:05:08Z
2023-02-28T17:28:57Z
2023-02-28T17:21:58Z
COLLABORATOR
null
null
null
Fixes the `key` range in the `query_table` call in `sort` to account for an indices mapping Fix #5586
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5587/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5587/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5587.diff", "html_url": "https://github.com/huggingface/datasets/pull/5587", "merged_at": "2023-02-28T17:21:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/5587.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5587" }
https://api.github.com/repos/huggingface/datasets/issues/7245
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7245/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7245/comments
https://api.github.com/repos/huggingface/datasets/issues/7245/events
https://github.com/huggingface/datasets/pull/7245
2,605,701,235
PR_kwDODunzps5_eaiE
7,245
Release: 3.0.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7245). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2024-10-22T14:53:34Z
2024-10-22T15:01:50Z
2024-10-22T15:01:47Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7245/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7245/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7245.diff", "html_url": "https://github.com/huggingface/datasets/pull/7245", "merged_at": "2024-10-22T15:01:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/7245.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7245" }
https://api.github.com/repos/huggingface/datasets/issues/6827
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6827/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6827/comments
https://api.github.com/repos/huggingface/datasets/issues/6827/events
https://github.com/huggingface/datasets/issues/6827
2,254,011,833
I_kwDODunzps6GWX25
6,827
Loading a remote dataset fails in the last release (v2.19.0)
{ "avatar_url": "https://avatars.githubusercontent.com/u/35369637?v=4", "events_url": "https://api.github.com/users/zrthxn/events{/privacy}", "followers_url": "https://api.github.com/users/zrthxn/followers", "following_url": "https://api.github.com/users/zrthxn/following{/other_user}", "gists_url": "https://api.github.com/users/zrthxn/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zrthxn", "id": 35369637, "login": "zrthxn", "node_id": "MDQ6VXNlcjM1MzY5NjM3", "organizations_url": "https://api.github.com/users/zrthxn/orgs", "received_events_url": "https://api.github.com/users/zrthxn/received_events", "repos_url": "https://api.github.com/users/zrthxn/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zrthxn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zrthxn/subscriptions", "type": "User", "url": "https://api.github.com/users/zrthxn", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2024-04-19T21:11:58Z
2024-04-19T21:13:42Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
While loading a dataset with multiple splits I get an error saying `Couldn't find file at <URL>` I am loading the dataset like so, nothing out of the ordinary. This dataset needs a token to access it. ``` token="hf_myhftoken-sdhbdsjgkhbd" load_dataset("speechcolab/gigaspeech", "test", cache_dir=f"gigaspeech/test", token=token) ``` I get the following error ![Screenshot 2024-04-19 at 11 03 07 PM](https://github.com/huggingface/datasets/assets/35369637/8dce757f-08ff-45dd-85b5-890fced7c5bc) Now you can see that the URL that it is trying to reach has the JSON object of the dataset split appended to the base URL. I think this may be due to a newly introduced issue. I did not have this issue with the previous version of the datasets. Everything was fine for me yesterday and after the release 12 hours ago, this seems to have broken. Also, the dataset in question runs custom code and I checked and there have been no commits to the dataset on Huggingface in 6 months. ### Steps to reproduce the bug Since this happened with one particular dataset for me, I am listing steps to use that dataset. 1. Open https://huggingface.co/datasets/speechcolab/gigaspeech and fill the form to get access. 2. Create a token on your huggingface account with read access. 3. Run the following line, substituing `<your_token_here>` with your token. ``` load_dataset("speechcolab/gigaspeech", "test", cache_dir=f"gigaspeech/test", token="<your_token_here>") ``` ### Expected behavior Be able to load the dataset in question. ### Environment info datasets == 2.19.0 python == 3.10 kernel == Linux 6.1.58+
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6827/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6827/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6003
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6003/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6003/comments
https://api.github.com/repos/huggingface/datasets/issues/6003/events
https://github.com/huggingface/datasets/issues/6003
1,786,554,110
I_kwDODunzps5qfKb-
6,003
interleave_datasets & DataCollatorForLanguageModeling having a conflict ?
{ "avatar_url": "https://avatars.githubusercontent.com/u/1929830?v=4", "events_url": "https://api.github.com/users/PonteIneptique/events{/privacy}", "followers_url": "https://api.github.com/users/PonteIneptique/followers", "following_url": "https://api.github.com/users/PonteIneptique/following{/other_user}", "gists_url": "https://api.github.com/users/PonteIneptique/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PonteIneptique", "id": 1929830, "login": "PonteIneptique", "node_id": "MDQ6VXNlcjE5Mjk4MzA=", "organizations_url": "https://api.github.com/users/PonteIneptique/orgs", "received_events_url": "https://api.github.com/users/PonteIneptique/received_events", "repos_url": "https://api.github.com/users/PonteIneptique/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PonteIneptique/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PonteIneptique/subscriptions", "type": "User", "url": "https://api.github.com/users/PonteIneptique", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2023-07-03T17:15:31Z
2023-07-03T17:15:31Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hi everyone :) I have two local & custom datasets (1 "sentence" per line) which I split along the 95/5 lines for pre-training a Bert model. I use a modified version of `run_mlm.py` in order to be able to make use of `interleave_dataset`: - `tokenize()` runs fine - `group_text()` runs fine Everytime, on step 19, I get ```pytb File "env/lib/python3.9/site-packages/transformers/data/data_collator.py", line 779, in torch_mask_tokens inputs[indices_random] = random_words[indices_random] RuntimeError: Index put requires the source and destination dtypes match, got Float for the destination and Long for the source. ``` I tried: - training without interleave on dataset 1, it runs - training without interleave on dataset 2, it runs - training without `.to_iterable_dataset()`, it hangs then crash - training without group_text() and padding to max_length seemed to fix the issue, but who knows if this was just because it was an issue that would come much later in terms of steps. I might have coded something wrong, but I don't get what ### Steps to reproduce the bug I have this function: ```py def build_dataset(path: str, percent: str): dataset = load_dataset( "text", data_files={"train": [path]}, split=f"train[{percent}]" ) dataset = dataset.map( lambda examples: tokenize(examples["text"]), batched=True, num_proc=num_proc, ) dataset = dataset.map( group_texts, batched=True, num_proc=num_proc, desc=f"Grouping texts in chunks of {tokenizer.max_seq_length}", remove_columns=["text"] ) print(len(dataset)) return dataset.to_iterable_dataset() ``` I hardcoded group_text: ```py def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, and if the total_length < max_seq_length we exclude this batch and return an empty dict. # We could add padding if the model supported it instead of this drop, you can customize this part to your needs. total_length = (total_length // 512) * 512 # Split by chunks of max_len. result = { k: [t[i: i + 512] for i in range(0, total_length, 512)] for k, t in concatenated_examples.items() } # result = {k: [el for el in elements if el] for k, elements in result.items()} return result ``` And then I build datasets using the following code: ```py train1 = build_dataset("d1.txt", ":95%") train2 = build_dataset("d2.txt", ":95%") dev1 = build_dataset("d1.txt", "95%:") dev2 = build_dataset("d2.txt", "95%:") ``` and finally I run ```py train_dataset = interleave_datasets( [train1, train2], probabilities=[0.8, 0.2], seed=42 ) eval_dataset = interleave_datasets( [dev1, dev2], probabilities=[0.8, 0.2], seed=42 ) ``` Then I run the training part which remains mostly untouched: > CUDA_VISIBLE_DEVICES=1 python custom_dataset.py --model_type bert --per_device_train_batch_size 32 --do_train --output_dir /var/mlm/training-bert/model --max_seq_length 512 --save_steps 10000 --save_total_limit 3 --auto_find_batch_size --logging_dir ./logs-bert --learning_rate 0.0001 --do_train --num_train_epochs 25 --warmup_steps 10000 --max_step 45000 --fp16 ### Expected behavior The model should then train normally, but fails every time at the same step (19). printing the variables at `inputs[indices_random] = random_words[indices_random]` shows a magnificient empty tensor (, 32) [if I remember well] ### Environment info transformers[torch] 4.30.2 Ubuntu A100 0 CUDA 12 Driver Version: 525.116.04
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6003/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6003/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/7369
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7369/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7369/comments
https://api.github.com/repos/huggingface/datasets/issues/7369/events
https://github.com/huggingface/datasets/issues/7369
2,787,193,238
I_kwDODunzps6mITGW
7,369
Importing dataset gives unhelpful error message when filenames in metadata.csv are not found in the directory
{ "avatar_url": "https://avatars.githubusercontent.com/u/38278139?v=4", "events_url": "https://api.github.com/users/svencornetsdegroot/events{/privacy}", "followers_url": "https://api.github.com/users/svencornetsdegroot/followers", "following_url": "https://api.github.com/users/svencornetsdegroot/following{/other_user}", "gists_url": "https://api.github.com/users/svencornetsdegroot/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/svencornetsdegroot", "id": 38278139, "login": "svencornetsdegroot", "node_id": "MDQ6VXNlcjM4Mjc4MTM5", "organizations_url": "https://api.github.com/users/svencornetsdegroot/orgs", "received_events_url": "https://api.github.com/users/svencornetsdegroot/received_events", "repos_url": "https://api.github.com/users/svencornetsdegroot/repos", "site_admin": false, "starred_url": "https://api.github.com/users/svencornetsdegroot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/svencornetsdegroot/subscriptions", "type": "User", "url": "https://api.github.com/users/svencornetsdegroot", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "I'd prefer even more verbose errors; like `\"file123.mp3\" is referenced in metadata.csv, but not found in the data directory '/path/to/audiofolder' ! (and 100+ more missing files)` Or something along those lines." ]
2025-01-14T13:53:21Z
2025-01-14T15:05:51Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug While importing an audiofolder dataset, where the names of the audiofiles don't correspond to the filenames in the metadata.csv, we get an unclear error message that is not helpful for the debugging, i.e. ``` ValueError: Instruction "train" corresponds to no data! ``` ### Steps to reproduce the bug Assume an audiofolder with audiofiles, filename1.mp3, filename2.mp3 etc and a file metadata.csv which contains the columns file_name and sentence. The file_names are formatted like filename1.mp3, filename2.mp3 etc. Load the audio ``` from datasets import load_dataset load_dataset("audiofolder", data_dir='/path/to/audiofolder') ``` When the file_names in the csv are not in sync with the filenames in the audiofolder, then we get an Error message: ``` File /opt/conda/lib/python3.12/site-packages/datasets/arrow_reader.py:251, in BaseReader.read(self, name, instructions, split_infos, in_memory) 249 if not files: 250 msg = f'Instruction "{instructions}" corresponds to no data!' --> 251 raise ValueError(msg) 252 return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) ValueError: Instruction "train" corresponds to no data! ``` load_dataset has a default value for the argument split = 'train'. ### Expected behavior It would be better to get an error report something like: ``` The metadata.csv file has different filenames than the files in the datadirectory. ``` It would have saved me 4 hours of debugging. ### Environment info - `datasets` version: 3.2.0 - Platform: Linux-5.14.0-427.40.1.el9_4.x86_64-x86_64-with-glibc2.39 - Python version: 3.12.8 - `huggingface_hub` version: 0.27.0 - PyArrow version: 18.1.0 - Pandas version: 2.2.3 - `fsspec` version: 2024.9.0
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/7369/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7369/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6509
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6509/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6509/comments
https://api.github.com/repos/huggingface/datasets/issues/6509/events
https://github.com/huggingface/datasets/pull/6509
2,046,720,869
PR_kwDODunzps5iREyE
6,509
Better cast error when generating dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6509). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "I created `DatatasetGenerationCastError` in `exceptions.py` that inherits from `DatasetGenerationError` (for backward compatibility) that inherits from `DatasetsError`.\r\n\r\nI also added a help message at the end of the error:\r\n\r\n```\r\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)\r\n```", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004991 / 0.011353 (-0.006361) | 0.003362 / 0.011008 (-0.007646) | 0.062093 / 0.038508 (0.023585) | 0.051533 / 0.023109 (0.028424) | 0.247508 / 0.275898 (-0.028390) | 0.275593 / 0.323480 (-0.047886) | 0.003828 / 0.007986 (-0.004158) | 0.002573 / 0.004328 (-0.001755) | 0.047727 / 0.004250 (0.043477) | 0.037029 / 0.037052 (-0.000023) | 0.250359 / 0.258489 (-0.008130) | 0.282640 / 0.293841 (-0.011201) | 0.027853 / 0.128546 (-0.100693) | 0.010247 / 0.075646 (-0.065400) | 0.206826 / 0.419271 (-0.212445) | 0.035837 / 0.043533 (-0.007695) | 0.251795 / 0.255139 (-0.003344) | 0.275654 / 0.283200 (-0.007545) | 0.017722 / 0.141683 (-0.123960) | 1.120287 / 1.452155 (-0.331868) | 1.203087 / 1.492716 (-0.289630) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092320 / 0.018006 (0.074314) | 0.300079 / 0.000490 (0.299589) | 0.000211 / 0.000200 (0.000011) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018193 / 0.037411 (-0.019218) | 0.061310 / 0.014526 (0.046784) | 0.072433 / 0.176557 (-0.104124) | 0.119092 / 0.737135 (-0.618043) | 0.074044 / 0.296338 (-0.222294) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297184 / 0.215209 (0.081975) | 2.805197 / 2.077655 (0.727543) | 1.521326 / 1.504120 (0.017206) | 1.374321 / 1.541195 (-0.166874) | 1.388767 / 1.468490 (-0.079723) | 0.571865 / 4.584777 (-4.012912) | 2.385213 / 3.745712 (-1.360499) | 2.726840 / 5.269862 (-2.543021) | 1.725352 / 4.565676 (-2.840325) | 0.063012 / 0.424275 (-0.361263) | 0.004911 / 0.007607 (-0.002697) | 0.336430 / 0.226044 (0.110385) | 3.390616 / 2.268929 (1.121688) | 1.846398 / 55.444624 (-53.598227) | 1.576797 / 6.876477 (-5.299680) | 1.579445 / 2.142072 (-0.562627) | 0.652515 / 4.805227 (-4.152712) | 0.118393 / 6.500664 (-6.382271) | 0.042155 / 0.075469 (-0.033314) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.942269 / 1.841788 (-0.899518) | 11.318258 / 8.074308 (3.243950) | 10.299948 / 10.191392 (0.108556) | 0.136088 / 0.680424 (-0.544336) | 0.013682 / 0.534201 (-0.520519) | 0.287549 / 0.579283 (-0.291734) | 0.258346 / 0.434364 (-0.176018) | 0.337146 / 0.540337 (-0.203191) | 0.443922 / 1.386936 (-0.943014) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005302 / 0.011353 (-0.006051) | 0.003234 / 0.011008 (-0.007774) | 0.049159 / 0.038508 (0.010651) | 0.050459 / 0.023109 (0.027350) | 0.273718 / 0.275898 (-0.002180) | 0.296997 / 0.323480 (-0.026483) | 0.003948 / 0.007986 (-0.004038) | 0.002590 / 0.004328 (-0.001739) | 0.048129 / 0.004250 (0.043879) | 0.039369 / 0.037052 (0.002317) | 0.276469 / 0.258489 (0.017980) | 0.306359 / 0.293841 (0.012519) | 0.028864 / 0.128546 (-0.099682) | 0.010253 / 0.075646 (-0.065394) | 0.058264 / 0.419271 (-0.361008) | 0.032451 / 0.043533 (-0.011082) | 0.277336 / 0.255139 (0.022197) | 0.296137 / 0.283200 (0.012937) | 0.018094 / 0.141683 (-0.123589) | 1.119539 / 1.452155 (-0.332615) | 1.163116 / 1.492716 (-0.329600) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092578 / 0.018006 (0.074572) | 0.300756 / 0.000490 (0.300267) | 0.000222 / 0.000200 (0.000022) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022333 / 0.037411 (-0.015078) | 0.076632 / 0.014526 (0.062107) | 0.087829 / 0.176557 (-0.088727) | 0.127686 / 0.737135 (-0.609449) | 0.091314 / 0.296338 (-0.205024) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297499 / 0.215209 (0.082290) | 2.889775 / 2.077655 (0.812120) | 1.598976 / 1.504120 (0.094856) | 1.478805 / 1.541195 (-0.062389) | 1.481818 / 1.468490 (0.013328) | 0.557972 / 4.584777 (-4.026804) | 2.453248 / 3.745712 (-1.292464) | 2.771823 / 5.269862 (-2.498039) | 1.721527 / 4.565676 (-2.844150) | 0.062786 / 0.424275 (-0.361489) | 0.005298 / 0.007607 (-0.002309) | 0.346660 / 0.226044 (0.120615) | 3.412262 / 2.268929 (1.143334) | 1.940240 / 55.444624 (-53.504384) | 1.654015 / 6.876477 (-5.222461) | 1.652039 / 2.142072 (-0.490034) | 0.636870 / 4.805227 (-4.168357) | 0.116213 / 6.500664 (-6.384451) | 0.040937 / 0.075469 (-0.034532) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.001605 / 1.841788 (-0.840183) | 11.986592 / 8.074308 (3.912284) | 10.231288 / 10.191392 (0.039896) | 0.130242 / 0.680424 (-0.550182) | 0.015764 / 0.534201 (-0.518437) | 0.289257 / 0.579283 (-0.290026) | 0.275996 / 0.434364 (-0.158368) | 0.323089 / 0.540337 (-0.217248) | 0.556383 / 1.386936 (-0.830553) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#773324159ad4afd7931588a710839b76670ddf87 \"CML watermark\")\n" ]
2023-12-18T13:57:24Z
2023-12-19T09:37:12Z
2023-12-19T09:31:03Z
MEMBER
null
null
null
I want to improve the error message for datasets like https://huggingface.co/datasets/m-a-p/COIG-CQIA Cc @albertvillanova @severo is this new error ok ? Or should I use a dedicated error class ? New: ```python Traceback (most recent call last): File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1920, in _prepare_split_single writer.write_table(table) File "/Users/quentinlhoest/hf/datasets/src/datasets/arrow_writer.py", line 574, in write_table pa_table = table_cast(pa_table, self._schema) File "/Users/quentinlhoest/hf/datasets/src/datasets/table.py", line 2322, in table_cast return cast_table_to_schema(table, schema) File "/Users/quentinlhoest/hf/datasets/src/datasets/table.py", line 2276, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast instruction: string other: string index: string domain: list<item: string> child 0, item: string output: string task_type: struct<major: list<item: string>, minor: list<item: string>> child 0, major: list<item: string> child 0, item: string child 1, minor: list<item: string> child 0, item: string task_name_in_eng: string input: string to {'answer_from': Value(dtype='string', id=None), 'instruction': Value(dtype='string', id=None), 'human_verified': Value(dtype='bool', id=None), 'domain': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'output': Value(dtype='string', id=None), 'task_type': {'major': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'minor': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'copyright': Value(dtype='string', id=None), 'input': Value(dtype='string', id=None)} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Users/quentinlhoest/hf/datasets/playground/ttest.py", line 74, in <module> load_dataset("m-a-p/COIG-CQIA") File "/Users/quentinlhoest/hf/datasets/src/datasets/load.py", line 2529, in load_dataset builder_instance.download_and_prepare( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 936, in download_and_prepare self._download_and_prepare( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1031, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1791, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1922, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 3 new columns (other, index, task_name_in_eng) and 3 missing columns (answer_from, copyright, human_verified). This happened while the json dataset builder was generating data using hf://datasets/m-a-p/COIG-CQIA/coig_pc/coig_pc_core_sample.json (at revision b7b7ecf290f6515036c7c04bd8537228ac2eb474) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) ``` Previously: ```python Traceback (most recent call last): File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1931, in _prepare_split_single writer.write_table(table) File "/Users/quentinlhoest/hf/datasets/src/datasets/arrow_writer.py", line 574, in write_table pa_table = table_cast(pa_table, self._schema) File "/Users/quentinlhoest/hf/datasets/src/datasets/table.py", line 2295, in table_cast return cast_table_to_schema(table, schema) File "/Users/quentinlhoest/hf/datasets/src/datasets/table.py", line 2253, in cast_table_to_schema raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match") ValueError: Couldn't cast task_type: struct<major: list<item: string>, minor: list<item: string>> child 0, major: list<item: string> child 0, item: string child 1, minor: list<item: string> child 0, item: string other: string instruction: string task_name_in_eng: string domain: list<item: string> child 0, item: string index: string output: string input: string to {'human_verified': Value(dtype='bool', id=None), 'task_type': {'major': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'minor': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'answer_from': Value(dtype='string', id=None), 'copyright': Value(dtype='string', id=None), 'instruction': Value(dtype='string', id=None), 'domain': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'output': Value(dtype='string', id=None), 'input': Value(dtype='string', id=None)} because column names don't match The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/quentinlhoest/hf/datasets/playground/ttest.py", line 74, in <module> load_dataset("m-a-p/COIG-CQIA") File "/Users/quentinlhoest/hf/datasets/src/datasets/load.py", line 2529, in load_dataset builder_instance.download_and_prepare( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 949, in download_and_prepare self._download_and_prepare( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1044, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1804, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/Users/quentinlhoest/hf/datasets/src/datasets/builder.py", line 1949, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.builder.DatasetGenerationError: An error occurred while generating the dataset ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6509/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6509/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6509.diff", "html_url": "https://github.com/huggingface/datasets/pull/6509", "merged_at": "2023-12-19T09:31:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/6509.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6509" }
https://api.github.com/repos/huggingface/datasets/issues/6553
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6553/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6553/comments
https://api.github.com/repos/huggingface/datasets/issues/6553/events
https://github.com/huggingface/datasets/issues/6553
2,063,474,183
I_kwDODunzps56_h4H
6,553
Cannot import name 'load_dataset' from .... module ‘datasets’
{ "avatar_url": "https://avatars.githubusercontent.com/u/83450192?v=4", "events_url": "https://api.github.com/users/ciaoyizhen/events{/privacy}", "followers_url": "https://api.github.com/users/ciaoyizhen/followers", "following_url": "https://api.github.com/users/ciaoyizhen/following{/other_user}", "gists_url": "https://api.github.com/users/ciaoyizhen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ciaoyizhen", "id": 83450192, "login": "ciaoyizhen", "node_id": "MDQ6VXNlcjgzNDUwMTky", "organizations_url": "https://api.github.com/users/ciaoyizhen/orgs", "received_events_url": "https://api.github.com/users/ciaoyizhen/received_events", "repos_url": "https://api.github.com/users/ciaoyizhen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ciaoyizhen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ciaoyizhen/subscriptions", "type": "User", "url": "https://api.github.com/users/ciaoyizhen", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "I don't know My conpany conputer cannot work. but in my computer, it work?", "Do you have a folder in your working directory called datasets?" ]
2024-01-03T08:18:21Z
2024-02-21T00:38:24Z
2024-02-21T00:38:24Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug use python -m pip install datasets to install ### Steps to reproduce the bug from datasets import load_dataset ### Expected behavior it doesn't work ### Environment info datasets version==2.15.0 python == 3.10.12 linux version I don't know??
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6553/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6553/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7414
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7414/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7414/comments
https://api.github.com/repos/huggingface/datasets/issues/7414/events
https://github.com/huggingface/datasets/pull/7414
2,863,798,756
PR_kwDODunzps6LxjsH
7,414
Gracefully cancel async tasks
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7414). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update." ]
2025-02-19T16:10:58Z
2025-02-20T14:12:26Z
2025-02-20T14:12:23Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7414/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7414/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7414.diff", "html_url": "https://github.com/huggingface/datasets/pull/7414", "merged_at": "2025-02-20T14:12:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/7414.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7414" }
https://api.github.com/repos/huggingface/datasets/issues/6701
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6701/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6701/comments
https://api.github.com/repos/huggingface/datasets/issues/6701/events
https://github.com/huggingface/datasets/pull/6701
2,161,448,017
PR_kwDODunzps5oTfO_
6,701
Base parquet batch_size on parquet row group size
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6701). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005490 / 0.011353 (-0.005863) | 0.003709 / 0.011008 (-0.007299) | 0.064192 / 0.038508 (0.025684) | 0.029581 / 0.023109 (0.006472) | 0.251086 / 0.275898 (-0.024812) | 0.267306 / 0.323480 (-0.056174) | 0.003074 / 0.007986 (-0.004912) | 0.003340 / 0.004328 (-0.000988) | 0.048820 / 0.004250 (0.044569) | 0.045370 / 0.037052 (0.008318) | 0.260384 / 0.258489 (0.001895) | 0.284558 / 0.293841 (-0.009283) | 0.027732 / 0.128546 (-0.100814) | 0.010661 / 0.075646 (-0.064986) | 0.213403 / 0.419271 (-0.205868) | 0.036283 / 0.043533 (-0.007250) | 0.250107 / 0.255139 (-0.005032) | 0.265220 / 0.283200 (-0.017980) | 0.021021 / 0.141683 (-0.120661) | 1.112058 / 1.452155 (-0.340096) | 1.169039 / 1.492716 (-0.323678) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095008 / 0.018006 (0.077002) | 0.303509 / 0.000490 (0.303019) | 0.000233 / 0.000200 (0.000033) | 0.000052 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018224 / 0.037411 (-0.019187) | 0.061366 / 0.014526 (0.046841) | 0.073584 / 0.176557 (-0.102972) | 0.119869 / 0.737135 (-0.617266) | 0.074228 / 0.296338 (-0.222111) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.288147 / 0.215209 (0.072938) | 2.824419 / 2.077655 (0.746764) | 1.478530 / 1.504120 (-0.025590) | 1.350127 / 1.541195 (-0.191067) | 1.349622 / 1.468490 (-0.118868) | 0.568058 / 4.584777 (-4.016719) | 2.377494 / 3.745712 (-1.368218) | 2.720767 / 5.269862 (-2.549094) | 1.710763 / 4.565676 (-2.854914) | 0.061498 / 0.424275 (-0.362778) | 0.004893 / 0.007607 (-0.002715) | 0.335633 / 0.226044 (0.109588) | 3.380646 / 2.268929 (1.111717) | 1.802436 / 55.444624 (-53.642188) | 1.562737 / 6.876477 (-5.313739) | 1.566267 / 2.142072 (-0.575806) | 0.629058 / 4.805227 (-4.176169) | 0.116307 / 6.500664 (-6.384357) | 0.042174 / 0.075469 (-0.033295) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.950945 / 1.841788 (-0.890842) | 11.279009 / 8.074308 (3.204701) | 9.433251 / 10.191392 (-0.758141) | 0.138964 / 0.680424 (-0.541460) | 0.014155 / 0.534201 (-0.520046) | 0.284065 / 0.579283 (-0.295218) | 0.263301 / 0.434364 (-0.171063) | 0.331932 / 0.540337 (-0.208406) | 0.441656 / 1.386936 (-0.945280) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005132 / 0.011353 (-0.006221) | 0.003484 / 0.011008 (-0.007524) | 0.049040 / 0.038508 (0.010532) | 0.030254 / 0.023109 (0.007145) | 0.277141 / 0.275898 (0.001243) | 0.295242 / 0.323480 (-0.028238) | 0.004295 / 0.007986 (-0.003690) | 0.002632 / 0.004328 (-0.001696) | 0.048540 / 0.004250 (0.044290) | 0.044787 / 0.037052 (0.007734) | 0.287736 / 0.258489 (0.029247) | 0.313146 / 0.293841 (0.019305) | 0.029340 / 0.128546 (-0.099206) | 0.010204 / 0.075646 (-0.065442) | 0.059058 / 0.419271 (-0.360214) | 0.051033 / 0.043533 (0.007500) | 0.274086 / 0.255139 (0.018947) | 0.293048 / 0.283200 (0.009848) | 0.019573 / 0.141683 (-0.122110) | 1.174032 / 1.452155 (-0.278123) | 1.227107 / 1.492716 (-0.265609) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094896 / 0.018006 (0.076890) | 0.303519 / 0.000490 (0.303029) | 0.000223 / 0.000200 (0.000023) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021495 / 0.037411 (-0.015917) | 0.074234 / 0.014526 (0.059708) | 0.086212 / 0.176557 (-0.090345) | 0.125052 / 0.737135 (-0.612084) | 0.087464 / 0.296338 (-0.208874) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297098 / 0.215209 (0.081889) | 2.970944 / 2.077655 (0.893289) | 1.650101 / 1.504120 (0.145981) | 1.532694 / 1.541195 (-0.008501) | 1.513652 / 1.468490 (0.045162) | 0.559614 / 4.584777 (-4.025163) | 2.404848 / 3.745712 (-1.340865) | 2.627851 / 5.269862 (-2.642011) | 1.707550 / 4.565676 (-2.858127) | 0.061821 / 0.424275 (-0.362454) | 0.005012 / 0.007607 (-0.002595) | 0.342462 / 0.226044 (0.116417) | 3.401703 / 2.268929 (1.132774) | 1.991632 / 55.444624 (-53.452993) | 1.737706 / 6.876477 (-5.138771) | 1.837457 / 2.142072 (-0.304616) | 0.638845 / 4.805227 (-4.166383) | 0.114773 / 6.500664 (-6.385891) | 0.040175 / 0.075469 (-0.035294) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.038286 / 1.841788 (-0.803501) | 11.885757 / 8.074308 (3.811448) | 10.061530 / 10.191392 (-0.129862) | 0.140824 / 0.680424 (-0.539600) | 0.015080 / 0.534201 (-0.519121) | 0.287992 / 0.579283 (-0.291291) | 0.273498 / 0.434364 (-0.160866) | 0.326478 / 0.540337 (-0.213860) | 0.426900 / 1.386936 (-0.960036) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b02be21047087c5ffc11cf1c072a5aceab517eba \"CML watermark\")\n" ]
2024-02-29T14:53:01Z
2024-02-29T15:15:18Z
2024-02-29T15:08:55Z
MEMBER
null
null
null
This allows to stream datasets like [Major-TOM/Core-S2L2A](https://huggingface.co/datasets/Major-TOM/Core-S2L2A) which have row groups with few rows (one row is ~10MB). Previously the cold start would take a lot of time and OOM because it would download many row groups before yielding the first example. I tried on OpenOrca and imagenet-hard and it does't affect overall throughput. Even if the overall throughput doesn't change for datasets like imagenet-hard with big rows, note that it does create shorter and more frequent pauses to download the next row group. Though I find it fine because previously the pauses were less frequent but very long (downloading multiple row groups at a time)
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6701/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6701/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6701.diff", "html_url": "https://github.com/huggingface/datasets/pull/6701", "merged_at": "2024-02-29T15:08:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/6701.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6701" }
https://api.github.com/repos/huggingface/datasets/issues/4827
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4827/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4827/comments
https://api.github.com/repos/huggingface/datasets/issues/4827/events
https://github.com/huggingface/datasets/pull/4827
1,335,994,312
PR_kwDODunzps49B1zi
4,827
Add license metadata to pg19
{ "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/julien-c", "id": 326577, "login": "julien-c", "node_id": "MDQ6VXNlcjMyNjU3Nw==", "organizations_url": "https://api.github.com/users/julien-c/orgs", "received_events_url": "https://api.github.com/users/julien-c/received_events", "repos_url": "https://api.github.com/users/julien-c/repos", "site_admin": false, "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "type": "User", "url": "https://api.github.com/users/julien-c", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-08-11T13:52:20Z
2022-08-11T15:01:03Z
2022-08-11T14:46:38Z
MEMBER
null
null
null
As reported over email by Roy Rijkers
{ "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/julien-c", "id": 326577, "login": "julien-c", "node_id": "MDQ6VXNlcjMyNjU3Nw==", "organizations_url": "https://api.github.com/users/julien-c/orgs", "received_events_url": "https://api.github.com/users/julien-c/received_events", "repos_url": "https://api.github.com/users/julien-c/repos", "site_admin": false, "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "type": "User", "url": "https://api.github.com/users/julien-c", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4827/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4827/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4827.diff", "html_url": "https://github.com/huggingface/datasets/pull/4827", "merged_at": "2022-08-11T14:46:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/4827.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4827" }
https://api.github.com/repos/huggingface/datasets/issues/5869
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5869/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5869/comments
https://api.github.com/repos/huggingface/datasets/issues/5869/events
https://github.com/huggingface/datasets/issues/5869
1,711,990,003
I_kwDODunzps5mCuTz
5,869
Image Encoding Issue when submitting a Parquet Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/47530815?v=4", "events_url": "https://api.github.com/users/PhilippeMoussalli/events{/privacy}", "followers_url": "https://api.github.com/users/PhilippeMoussalli/followers", "following_url": "https://api.github.com/users/PhilippeMoussalli/following{/other_user}", "gists_url": "https://api.github.com/users/PhilippeMoussalli/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PhilippeMoussalli", "id": 47530815, "login": "PhilippeMoussalli", "node_id": "MDQ6VXNlcjQ3NTMwODE1", "organizations_url": "https://api.github.com/users/PhilippeMoussalli/orgs", "received_events_url": "https://api.github.com/users/PhilippeMoussalli/received_events", "repos_url": "https://api.github.com/users/PhilippeMoussalli/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PhilippeMoussalli/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PhilippeMoussalli/subscriptions", "type": "User", "url": "https://api.github.com/users/PhilippeMoussalli", "user_view_type": "public" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi @PhilippeMoussalli thanks for opening a detailed issue. It seems the issue is more related to the `datasets` library so I'll ping @lhoestq @mariosasko on this one :) \n\n(edit: also can one of you move the issue to the datasets repo? Thanks in advance 🙏)", "Hi ! The `Image()` info is stored in the **schema metadata**. More precisely there should be a \"huggingface\" field in the schema metadata that contains the `datasets` feature type of each column.\r\n\r\nTo fix your issue, you can use the same schema as the original Parquet files to write the new ones. You can also get the schema with metadata from a `Features` object, e.g.\r\n\r\n```python\r\nfrom datasets import Features, Image, Value\r\n\r\nfeatures = Features({\"image\": Image(), \"text\": Value(\"string\")})\r\nschema = features.arrow_schema\r\nprint(schema.metadata)\r\n# {b'huggingface': b'{\"info\": {\"features\": {\"image\": {\"_type\": \"Image\"}, \"text\": {\"dtype\": \"string\", \"_type\": \"Value\"}}}}'}\r\n```", "It appears that the parquet files at `hf://datasets/lambdalabs/pokemon-blip-captions` don't have this metadata, and it is defined in the dataset_infos.json instead (legacy).\r\n\r\nYou can get the right schema with the HF metadata this way:\r\n\r\n```python\r\nfrom datasets import load_dataset_builder\r\n\r\nfeatures = load_dataset_builder(\"lambdalabs/pokemon-blip-captions\").info.features\r\nschema = features.arrow_schema\r\n```", "Btw in the future we might add support for an dedicated Image extension type in Arrow so that you won't need to add the schema metadata anymore ;)", "Thanks @Wauplin @lhoestq for the quick reply :)! \r\n\r\nI tried your approach by passing the huggingface schema to the dask writer \r\n\r\n```\r\nfrom datasets import Features, Image, Value\r\ndf = dd.read_parquet(f\"hf://datasets/lambdalabs/pokemon-blip-captions\",index=False)\r\nfeatures = Features({\"image\": Image(), \"text\": Value(\"string\")})\r\nschema = features.arrow_schema\r\ndd.to_parquet(df, path = \"hf://datasets/philippemo/dummy_dataset/data\", schema=schema)\r\n```\r\nAt first it didn't work as I was not able to visualize the images, so then I manually added the `dataset_infos.json` from the example dataset and it worked :)\r\n\r\nHowever, It's not very ideal since there are some metadata in that file that need to be computed in order to load the data properly such as `num_of_bytes` and `num_examples` which might be unknown in my use case. \r\n\r\n![Screenshot from 2023-05-16 16-54-55](https://github.com/huggingface/datasets/assets/47530815/b2b448d2-d3d8-43a7-9682-9c0187a5192b)\r\n\r\nDo you have any pointers there? you mentioned that `datasets_info.json` will be deprecated/legacy. Could you point me to some example image datasets on the hub that are stored as parquet and don't have the `datasets_info.json`?\r\n\r\n", "You don't need the dataset_infos.json file as long as you have the schema with HF metadata ;)\r\nI could also check that it works fine myself on the git revision without the dataset_infos.json file.\r\n\r\nWhat made you think it didn't work ?", "> You don't need the dataset_infos.json file as long as you have the schema with HF metadata ;) I could also check that it works fine myself on the git revision without the dataset_infos.json file.\r\n> \r\n> What made you think it didn't work ?\r\n\r\nThose are two identical dataset repos where both were pushed with dask with the specified schema you mentioned above. I then uploaded the `dataset_infos.json` manually taken from the original example dataset into one of them. \r\n\r\n* **With schema**: https://huggingface.co/datasets/philippemo/dummy_dataset_with_schema\r\n* **Without schema**: https://huggingface.co/datasets/philippemo/dummy_dataset_without_schema\r\n\r\nYou can see that in the examples without schema the images fail to render properly. When loaded with `datasets` they return an dict and not a Pillow Image ", "I see ! I think it's a bug on our side - it should work without the metadata - let me investigate", "Alright, it's fixed: https://huggingface.co/datasets/philippemo/dummy_dataset_without_schema\r\n\r\nIt shows the image correctly now - even without the extra metadata :)", "Thanks @lhoestq! \r\nI tested pushing a dataset again without the metadata and it works perfectly! \r\nI appreciate the help", "Hi @lhoestq, \r\n\r\nI'v tried pushing another dataset again and I think the issue reappeared again: \r\n\r\n```\r\ndf = dd.read_parquet(f\"hf://datasets/lambdalabs/pokemon-blip-captions\")\r\nfeatures = datasets.Features({\"image\": datasets.Image(), \"text\": datasets.Value(\"string\")})\r\nschema = features.arrow_schema\r\ndd.to_parquet(df, path = \"hf://datasets/philippemo/dummy_dataset_without_schema_12_06/data\", schema=schema)\r\n```\r\n\r\nHere is the dataset: \r\n https://huggingface.co/datasets/philippemo/dummy_dataset_without_schema_12_06\r\nThe one that was working 2 weeks ago still seems to be intact though, it might be that It rendered properly when it was initially submitted and after this something was reverted from your side:\r\nhttps://huggingface.co/datasets/philippemo/dummy_dataset_without_schema\r\n\r\nIt's weird because nothing really changed from the implementation, might be another issue in the hub backend. Do you have any pointers on how to resolve this? ", "We're doing some changes in the way we're handling image parquet datasets right now. We'll include the fix from https://github.com/huggingface/datasets/pull/5921 in the new datasets-server version in the coming days", "alright thanks for the update :), would that be part of the new release of datasets or is it something separate? if so, where can I track it? ", "Once the new version of `datasets` is released (tomorrow probably) we'll open an issue on https://github.com/huggingface/datasets-server to update to this version :)", "Alright we did the update :) This is fixed for good now", "Yes thanks 🎉🎉🎉" ]
2023-05-16T09:42:58Z
2023-06-16T12:48:38Z
2023-06-16T09:30:48Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hello, I'd like to report an issue related to pushing a dataset represented as a Parquet file to a dataset repository using Dask. Here are the details: We attempted to load an example dataset in Parquet format from the Hugging Face (HF) filesystem using Dask with the following code snippet: ``` import dask.dataframe as dd df = dd.read_parquet("hf://datasets/lambdalabs/pokemon-blip-captions",index=False) ``` In this dataset, the "image" column is represented as a dictionary/struct with the format: ``` df = df.compute() df["image"].iloc[0].keys() -> dict_keys(['bytes', 'path']) ``` I think this is the format encoded by the [`Image`](https://huggingface.co/docs/datasets/v2.0.0/en/package_reference/main_classes#datasets.Image) feature extractor from datasets to format suitable for Arrow. The next step was to push the dataset to a repository that I created: ``` dd.to_parquet(dask_df, path = "hf://datasets/philippemo/dummy_dataset/data") ``` However, after pushing the dataset using Dask, the "image" column is now represented as the encoded dictionary `(['bytes', 'path'])`, and the images are not properly visualized. You can find the dataset here: [Link to the problematic dataset](https://huggingface.co/datasets/philippemo/dummy_dataset). It's worth noting that both the original dataset and the one submitted with Dask have the same schema with minor alterations related to metadata: **[ Schema of original dummy example.](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions/blob/main/data/train-00000-of-00001-566cc9b19d7203f8.parquet)** ``` image: struct<bytes: binary, path: null> child 0, bytes: binary child 1, path: null text: string ``` **[ Schema of pushed dataset with dask](https://huggingface.co/datasets/philippemo/dummy_dataset/blob/main/data/part.0.parquet)** ``` image: struct<bytes: binary, path: null> child 0, bytes: binary child 1, path: null text: string ``` This issue seems to be related to an encoding type that occurs when pushing a model to the hub. Normally, models should be represented as an HF dataset before pushing, but we are working with an example where we need to push large datasets using Dask. Could you please provide clarification on how to resolve this issue? Thank you! ### Reproduction To get the schema I downloaded the parquet files and used pyarrow.parquet to read the schema ``` import pyarrow.parquet pyarrow.parquet.read_schema(<path_to_parquet>, memory_map=True) ``` ### Logs _No response_ ### System info ```shell - huggingface_hub version: 0.14.1 - Platform: Linux-5.19.0-41-generic-x86_64-with-glibc2.35 - Python version: 3.10.6 - Running in iPython ?: No - Running in notebook ?: No - Running in Google Colab ?: No - Token path ?: /home/philippe/.cache/huggingface/token - Has saved token ?: True - Who am I ?: philippemo - Configured git credential helpers: cache - FastAI: N/A - Tensorflow: N/A - Torch: N/A - Jinja2: 3.1.2 - Graphviz: N/A - Pydot: N/A - Pillow: 9.4.0 - hf_transfer: N/A - gradio: N/A - ENDPOINT: https://huggingface.co - HUGGINGFACE_HUB_CACHE: /home/philippe/.cache/huggingface/hub - HUGGINGFACE_ASSETS_CACHE: /home/philippe/.cache/huggingface/assets - HF_TOKEN_PATH: /home/philippe/.cache/huggingface/token - HF_HUB_OFFLINE: False - HF_HUB_DISABLE_TELEMETRY: False - HF_HUB_DISABLE_PROGRESS_BARS: None - HF_HUB_DISABLE_SYMLINKS_WARNING: False - HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False - HF_HUB_DISABLE_IMPLICIT_TOKEN: False - HF_HUB_ENABLE_HF_TRANSFER: False ```
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5869/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5869/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5781
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5781/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5781/comments
https://api.github.com/repos/huggingface/datasets/issues/5781/events
https://github.com/huggingface/datasets/issues/5781
1,679,580,460
I_kwDODunzps5kHF0s
5,781
Error using `load_datasets`
{ "avatar_url": "https://avatars.githubusercontent.com/u/61463108?v=4", "events_url": "https://api.github.com/users/gjyoungjr/events{/privacy}", "followers_url": "https://api.github.com/users/gjyoungjr/followers", "following_url": "https://api.github.com/users/gjyoungjr/following{/other_user}", "gists_url": "https://api.github.com/users/gjyoungjr/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gjyoungjr", "id": 61463108, "login": "gjyoungjr", "node_id": "MDQ6VXNlcjYxNDYzMTA4", "organizations_url": "https://api.github.com/users/gjyoungjr/orgs", "received_events_url": "https://api.github.com/users/gjyoungjr/received_events", "repos_url": "https://api.github.com/users/gjyoungjr/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gjyoungjr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gjyoungjr/subscriptions", "type": "User", "url": "https://api.github.com/users/gjyoungjr", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "It looks like an issue with your installation of scipy, can you try reinstalling it ?", "Sorry for the late reply, but that worked @lhoestq . Thanks for the assist." ]
2023-04-22T15:10:44Z
2023-05-02T23:41:25Z
2023-05-02T23:41:25Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I tried to load a dataset using the `datasets` library in a conda jupyter notebook and got the below error. ``` ImportError: dlopen(/Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/_iterative.cpython-38-darwin.so, 0x0002): Library not loaded: @rpath/liblapack.3.dylib Referenced from: <65B094A2-59D7-31AC-A966-4DB9E11D2A15> /Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/_iterative.cpython-38-darwin.so Reason: tried: '/Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/liblapack.3.dylib' (no such file), '/Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/../../../../../../liblapack.3.dylib' (no such file), '/Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/liblapack.3.dylib' (no such file), '/Users/gilbertyoung/miniforge3/envs/review_sense/lib/python3.8/site-packages/scipy/sparse/linalg/_isolve/../../../../../../liblapack.3.dylib' (no such file), '/Users/gilbertyoung/miniforge3/envs/review_sense/bin/../lib/liblapack.3.dylib' (no such file), '/Users/gilbertyoung/miniforge3/envs/review_sense/bin/../lib/liblapack.3.dylib' (no such file), '/usr/local/lib/liblapack.3.dylib' (no such file), '/usr/lib/liblapack.3.dylib' (no such file, not in dyld cache) ``` ### Steps to reproduce the bug Run the `load_datasets` function ### Expected behavior I expected the dataset to be loaded into my notebook. ### Environment info name: review_sense channels: - apple - conda-forge dependencies: - python=3.8 - pip>=19.0 - jupyter - tensorflow-deps #- scikit-learn #- scipy - pandas - pandas-datareader - matplotlib - pillow - tqdm - requests - h5py - pyyaml - flask - boto3 - ipykernel - seaborn - pip: - tensorflow-macos==2.9 - tensorflow-metal==0.5.0 - bayesian-optimization - gym - kaggle - huggingface_hub - datasets - numpy - huggingface
{ "avatar_url": "https://avatars.githubusercontent.com/u/61463108?v=4", "events_url": "https://api.github.com/users/gjyoungjr/events{/privacy}", "followers_url": "https://api.github.com/users/gjyoungjr/followers", "following_url": "https://api.github.com/users/gjyoungjr/following{/other_user}", "gists_url": "https://api.github.com/users/gjyoungjr/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gjyoungjr", "id": 61463108, "login": "gjyoungjr", "node_id": "MDQ6VXNlcjYxNDYzMTA4", "organizations_url": "https://api.github.com/users/gjyoungjr/orgs", "received_events_url": "https://api.github.com/users/gjyoungjr/received_events", "repos_url": "https://api.github.com/users/gjyoungjr/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gjyoungjr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gjyoungjr/subscriptions", "type": "User", "url": "https://api.github.com/users/gjyoungjr", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5781/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5781/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/5373
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5373/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5373/comments
https://api.github.com/repos/huggingface/datasets/issues/5373/events
https://github.com/huggingface/datasets/pull/5373
1,501,484,197
PR_kwDODunzps5FtRU4
5,373
Simplify skipping
{ "avatar_url": "https://avatars.githubusercontent.com/u/62820084?v=4", "events_url": "https://api.github.com/users/Muennighoff/events{/privacy}", "followers_url": "https://api.github.com/users/Muennighoff/followers", "following_url": "https://api.github.com/users/Muennighoff/following{/other_user}", "gists_url": "https://api.github.com/users/Muennighoff/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Muennighoff", "id": 62820084, "login": "Muennighoff", "node_id": "MDQ6VXNlcjYyODIwMDg0", "organizations_url": "https://api.github.com/users/Muennighoff/orgs", "received_events_url": "https://api.github.com/users/Muennighoff/received_events", "repos_url": "https://api.github.com/users/Muennighoff/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Muennighoff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Muennighoff/subscriptions", "type": "User", "url": "https://api.github.com/users/Muennighoff", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-12-17T17:23:52Z
2022-12-18T21:43:31Z
2022-12-18T21:40:21Z
CONTRIBUTOR
null
null
null
Was hoping to find a way to speed up the skipping as I'm running into bottlenecks skipping 100M examples on C4 (it takes 12 hours to skip), but didn't find anything better than this small change :( Maybe there's a way to directly skip whole shards to speed it up? 🧐
{ "avatar_url": "https://avatars.githubusercontent.com/u/62820084?v=4", "events_url": "https://api.github.com/users/Muennighoff/events{/privacy}", "followers_url": "https://api.github.com/users/Muennighoff/followers", "following_url": "https://api.github.com/users/Muennighoff/following{/other_user}", "gists_url": "https://api.github.com/users/Muennighoff/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Muennighoff", "id": 62820084, "login": "Muennighoff", "node_id": "MDQ6VXNlcjYyODIwMDg0", "organizations_url": "https://api.github.com/users/Muennighoff/orgs", "received_events_url": "https://api.github.com/users/Muennighoff/received_events", "repos_url": "https://api.github.com/users/Muennighoff/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Muennighoff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Muennighoff/subscriptions", "type": "User", "url": "https://api.github.com/users/Muennighoff", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5373/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5373/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5373.diff", "html_url": "https://github.com/huggingface/datasets/pull/5373", "merged_at": "2022-12-18T21:40:21Z", "patch_url": "https://github.com/huggingface/datasets/pull/5373.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5373" }
https://api.github.com/repos/huggingface/datasets/issues/6886
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6886/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6886/comments
https://api.github.com/repos/huggingface/datasets/issues/6886/events
https://github.com/huggingface/datasets/issues/6886
2,286,328,984
I_kwDODunzps6IRpyY
6,886
load_dataset with data_dir and cache_dir set fail with not supported
{ "avatar_url": "https://avatars.githubusercontent.com/u/322496?v=4", "events_url": "https://api.github.com/users/fah/events{/privacy}", "followers_url": "https://api.github.com/users/fah/followers", "following_url": "https://api.github.com/users/fah/following{/other_user}", "gists_url": "https://api.github.com/users/fah/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fah", "id": 322496, "login": "fah", "node_id": "MDQ6VXNlcjMyMjQ5Ng==", "organizations_url": "https://api.github.com/users/fah/orgs", "received_events_url": "https://api.github.com/users/fah/received_events", "repos_url": "https://api.github.com/users/fah/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fah/subscriptions", "type": "User", "url": "https://api.github.com/users/fah", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2024-05-08T19:52:35Z
2024-05-08T19:58:11Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug with python 3.11 I execute: ```py from transformers import Wav2Vec2Processor, Data2VecAudioModel import torch from torch import nn from datasets import load_dataset, concatenate_datasets # load demo audio and set processor dataset_clean = load_dataset("librispeech_asr", "clean", split="validation", data_dir="data", cache_dir="cache") ``` This fails in the last line with ```log Found cached dataset librispeech_asr (file:///Users/as/Documents/Project/git/audio2vec/cache/librispeech_asr/clean-data_dir=data/2.1.0/cff5df6e7955c80a67f80e27e7e655de71c689e2d2364bece785b972acb37fe7) Traceback (most recent call last): File "/Users/as/Documents/Project/git/audio2vec/src/music2vec-v1.py", line 7, in <module> dataset_clean = load_dataset("librispeech_asr", "clean", split="validation", data_dir="data", cache_dir="cache") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/as/anaconda3/lib/python3.11/site-packages/datasets/load.py", line 1810, in load_dataset ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/as/anaconda3/lib/python3.11/site-packages/datasets/builder.py", line 1113, in as_dataset raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.") NotImplementedError: Loading a dataset cached in a LocalFileSystem is not supported. ``` ### Steps to reproduce the bug I setup an venv with requirements.txt ```txt transformers==4.40.2 torch==2.2.2 datasets==2.16.0 fsspec==2023.9.2 ``` pip freeze is: ``` aiohttp==3.9.5 aiosignal==1.3.1 attrs==23.2.0 certifi==2024.2.2 charset-normalizer==3.3.2 datasets==2.16.0 dill==0.3.7 filelock==3.14.0 frozenlist==1.4.1 fsspec==2023.9.2 huggingface-hub==0.23.0 idna==3.7 Jinja2==3.1.4 MarkupSafe==2.1.5 mpmath==1.3.0 multidict==6.0.5 multiprocess==0.70.15 networkx==3.3 numpy==1.26.4 packaging==24.0 pandas==2.2.2 pyarrow==16.0.0 pyarrow-hotfix==0.6 python-dateutil==2.9.0.post0 pytz==2024.1 PyYAML==6.0.1 regex==2024.4.28 requests==2.31.0 safetensors==0.4.3 six==1.16.0 sympy==1.12 tokenizers==0.19.1 torch==2.2.2 tqdm==4.66.4 transformers==4.40.2 typing_extensions==4.11.0 tzdata==2024.1 urllib3==2.2.1 xxhash==3.4.1 yarl==1.9.4 ``` I execute this on a M1 Mac. ### Expected behavior I don't understand the error message. Why is "local" caching not supported. Would it possible to give some additional hint with the error message how to solve this issue? ### Environment info source .... python -u example.py
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6886/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6886/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6995
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6995/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6995/comments
https://api.github.com/repos/huggingface/datasets/issues/6995/events
https://github.com/huggingface/datasets/issues/6995
2,370,713,475
I_kwDODunzps6NTjeD
6,995
ImportError when importing datasets.load_dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/124846947?v=4", "events_url": "https://api.github.com/users/Leo-Lsc/events{/privacy}", "followers_url": "https://api.github.com/users/Leo-Lsc/followers", "following_url": "https://api.github.com/users/Leo-Lsc/following{/other_user}", "gists_url": "https://api.github.com/users/Leo-Lsc/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Leo-Lsc", "id": 124846947, "login": "Leo-Lsc", "node_id": "U_kgDOB3EDYw", "organizations_url": "https://api.github.com/users/Leo-Lsc/orgs", "received_events_url": "https://api.github.com/users/Leo-Lsc/received_events", "repos_url": "https://api.github.com/users/Leo-Lsc/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Leo-Lsc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Leo-Lsc/subscriptions", "type": "User", "url": "https://api.github.com/users/Leo-Lsc", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[ "What is the version of your installed `huggingface-hub`:\r\n```python\r\nimport huggingface_hub\r\nprint(huggingface_hub.__version__)\r\n```\r\n\r\nIt seems you have a very old version of `huggingface-hub`, where `CommitInfo` was not still implemented. You need to update it:\r\n```\r\npip install -U huggingface-hub\r\n```\r\n\r\nNote that `CommitInfo` was implemented in huggingface-hub 0.10.0 and datasets requires \"huggingface-hub>=0.21.2\"", "The version of my huggingface-hub is 0.23.4.", "The error message says there is no CommitInfo in your installed huggingface-hub library:\r\n```\r\nImportError: cannot import name 'CommitInfo' from 'huggingface_hub' (D:\\Anaconda3\\envs\\CS224S\\Lib\\site-packages\\huggingface_hub_init_.py)\r\n```\r\n\r\nAnd this is implemented since version 0.10.0:\r\n- https://github.com/huggingface/huggingface_hub/pull/1066", "I am getting the exact same issue when I `import datasets`. The version of my huggingface-hub is also 0.23.4. I dont see a solution in the comments. Not sure why is this issue closed?", "I closed the issue because the problem is not related to the `datasets` library.\r\n\r\nThe problem is with your local Python environment: it seems corrupted. You could try to remove it and regenerate it again.", "I have recreated my conda environment but still run into the same issue. Here is my environment:\r\n```\r\nconda create --name esm python=3.10\r\n conda activate esm\r\n conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia\r\n pip3 install -r requirements.txt\r\n```\r\nRequirements.txt\r\n```\r\naccelerate\r\ndatasets==2.20.0\r\npyfastx\r\ntransformers\r\nboto3\r\nhuggingface_hub==0.23.4\r\n```\r\n\r\nAnd then I get:\r\n```\r\n>>> import datasets\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/fsx/ubuntu/miniconda3/envs/esm2/lib/python3.10/site-packages/datasets/__init__.py\", line 17, in <module>\r\n from .arrow_dataset import Dataset\r\n File \"/fsx/ubuntu/miniconda3/envs/esm2/lib/python3.10/site-packages/datasets/arrow_dataset.py\", line 63, in <module>\r\n from huggingface_hub import (\r\nImportError: cannot import name 'CommitInfo' from 'huggingface_hub' (/fsx/ubuntu/miniconda3/envs/esm2/lib/python3.10/site-packages/huggingface_hub/__init__.py)\r\n>>>\r\n```\r\n\r\n", "You can check:\r\n```\r\n>>> import huggingface_hub\r\n>>> print(huggingface_hub.__version__)\r\n```", "This is what I see:\r\n```\r\n>>> import huggingface_hub\r\n>>> print(huggingface_hub.__version__)\r\n0.23.4\r\n```", "Installing `chardet` makes it work for some reason" ]
2024-06-24T17:07:22Z
2024-11-14T01:42:09Z
2024-06-25T06:11:37Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I encountered an ImportError while trying to import `load_dataset` from the `datasets` module in Hugging Face. The error message indicates a problem with importing 'CommitInfo' from 'huggingface_hub'. ### Steps to reproduce the bug 1. pip install git+https://github.com/huggingface/datasets 2. from datasets import load_dataset ### Expected behavior ImportError Traceback (most recent call last) Cell In[7], [line 1](vscode-notebook-cell:?execution_count=7&line=1) ----> [1](vscode-notebook-cell:?execution_count=7&line=1) from datasets import load_dataset [3](vscode-notebook-cell:?execution_count=7&line=3) train_set = load_dataset("mispeech/speechocean762", split="train") [4](vscode-notebook-cell:?execution_count=7&line=4) test_set = load_dataset("mispeech/speechocean762", split="test") File d:\Anaconda3\envs\CS224S\Lib\site-packages\datasets\__init__.py:[1](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:1)7 1 # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. [2](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:2) # [3](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:3) # Licensed under the Apache License, Version 2.0 (the "License"); (...) [12](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:12) # See the License for the specific language governing permissions and [13](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:13) # limitations under the License. [15](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:15) __version__ = "2.20.1.dev0" ---> [17](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:17) from .arrow_dataset import Dataset [18](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:18) from .arrow_reader import ReadInstruction [19](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/__init__.py:19) from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder File d:\Anaconda3\envs\CS224S\Lib\site-packages\datasets\arrow_dataset.py:63 [61](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:61) import pyarrow.compute as pc [62](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:62) from fsspec.core import url_to_fs ---> [63](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:63) from huggingface_hub import ( [64](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:64) CommitInfo, [65](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:65) CommitOperationAdd, ... [70](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:70) ) [71](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:71) from huggingface_hub.hf_api import RepoFile [72](file:///D:/Anaconda3/envs/CS224S/Lib/site-packages/datasets/arrow_dataset.py:72) from multiprocess import Pool ImportError: cannot import name 'CommitInfo' from 'huggingface_hub' (d:\Anaconda3\envs\CS224S\Lib\site-packages\huggingface_hub\__init__.py) Output is truncated. View as a [scrollable element](command:cellOutput.enableScrolling?580889ab-0f61-4f37-9214-eaa2b3807f85) or open in a [text editor](command:workbench.action.openLargeOutput?580889ab-0f61-4f37-9214-eaa2b3807f85). Adjust cell output [settings](command:workbench.action.openSettings?%5B%22%40tag%3AnotebookOutputLayout%22%5D)... ### Environment info Leo@DESKTOP-9NHUAMI MSYS /d/Anaconda3/envs/CS224S/Lib/site-packages/huggingface_hub $ datasets-cli env Traceback (most recent call last): File "<frozen runpy>", line 198, in _run_module_as_main File "<frozen runpy>", line 88, in _run_code File "D:\Anaconda3\envs\CS224S\Scripts\datasets-cli.exe\__main__.py", line 4, in <module> File "D:\Anaconda3\envs\CS224S\Lib\site-packages\datasets\__init__.py", line 17, in <module> from .arrow_dataset import Dataset File "D:\Anaconda3\envs\CS224S\Lib\site-packages\datasets\arrow_dataset.py", line 63, in <module> from huggingface_hub import ( ImportError: cannot import name 'CommitInfo' from 'huggingface_hub' (D:\Anaconda3\envs\CS224S\Lib\site-packages\huggingface_hub\__init__.py) (CS224S)
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6995/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6995/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7008
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7008/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7008/comments
https://api.github.com/repos/huggingface/datasets/issues/7008/events
https://github.com/huggingface/datasets/issues/7008
2,379,591,141
I_kwDODunzps6N1a3l
7,008
Support ruff 0.5.0 in CI
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "color": "d4c5f9", "default": false, "description": "Maintenance tasks", "id": 4296013012, "name": "maintenance", "node_id": "LA_kwDODunzps8AAAABAA_01A", "url": "https://api.github.com/repos/huggingface/datasets/labels/maintenance" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[]
2024-06-28T05:11:26Z
2024-06-28T07:11:18Z
2024-06-28T07:11:18Z
MEMBER
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
Support ruff 0.5.0 in CI. Also revert: - #7007
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7008/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7008/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6505
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6505/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6505/comments
https://api.github.com/repos/huggingface/datasets/issues/6505/events
https://github.com/huggingface/datasets/issues/6505
2,044,721,288
I_kwDODunzps553_iI
6,505
Got stuck when I trying to load a dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/18232551?v=4", "events_url": "https://api.github.com/users/yirenpingsheng/events{/privacy}", "followers_url": "https://api.github.com/users/yirenpingsheng/followers", "following_url": "https://api.github.com/users/yirenpingsheng/following{/other_user}", "gists_url": "https://api.github.com/users/yirenpingsheng/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yirenpingsheng", "id": 18232551, "login": "yirenpingsheng", "node_id": "MDQ6VXNlcjE4MjMyNTUx", "organizations_url": "https://api.github.com/users/yirenpingsheng/orgs", "received_events_url": "https://api.github.com/users/yirenpingsheng/received_events", "repos_url": "https://api.github.com/users/yirenpingsheng/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yirenpingsheng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yirenpingsheng/subscriptions", "type": "User", "url": "https://api.github.com/users/yirenpingsheng", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "I ran into the same problem when I used a server cluster (Slurm system managed) that couldn't load any of the huggingface datasets or models, but it worked on my laptop. I suspected some system configuration-related problem, but I had no idea. \r\nMy problems are consistent with [issue #2618](https://github.com/huggingface/datasets/issues/2618). All the huggingface-related libraries I use are the latest versions.\r\n\r\n", "> I ran into the same problem when I used a server cluster (Slurm system managed) that couldn't load any of the huggingface datasets or models, but it worked on my laptop. I suspected some system configuration-related problem, but I had no idea. My problems are consistent with [issue #2618](https://github.com/huggingface/datasets/issues/2618). All the huggingface-related libraries I use are the latest versions.\r\n\r\nhave you solved this issue yet? i met the same problem on server but everything works on laptop. I think maybe the filelock repo is contradictory with file system.", "I am having the same issue on a computing cluster but this works on my laptop as well. I instead have this error:\r\n`/home/.conda/envs/py10/lib/python3.10/site-packages/filelock/_unix.py\", line 43, in _acquire\r\n fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\r\nOSError: [Errno 5] Input/output error`\r\n\r\nthe load_dataset command does not work on server for local or hosted hugging-face datasets, and I have tried for several files", "Same here. Is there any solution?", "In my case, `.cahce` was in a shared folder. Moving it into the user's home folder fixed the problem. #2618 for more details", "> In my case, `.cahce` was in a shared folder. Moving it into the user's home folder fixed the problem. #2618 for more details在我的情况下, `.cahce` 在一个共享文件夹中。将其移动到用户的主文件夹中解决了问题。 #2618 获取更多详细信息。\r\n\r\nCan you be more specific? thank.", "https://research.google.com/colaboratory/faq.html#drive-timeout\r\n\r\nIf it is in colab this could be the reason" ]
2023-12-16T11:51:07Z
2024-12-24T16:45:52Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug Hello, everyone. I met a problem when I am trying to load a data file using load_dataset method on a Debian 10 system. The data file is not very large, only 1.63MB with 600 records. Here is my code: from datasets import load_dataset dataset = load_dataset('json', data_files='mypath/oaast_rm_zh.json') I waited it for 20 minutes. It still no response. I cannot using Ctrl+C to cancel the command. I have to use Ctrl+Z to kill it. I also try it with a txt file, it still no response in a long time. I can load the same file successfully using my laptop (windows 10, python 3.8.5, datasets==2.14.5). I can also make it on another computer (Ubuntu 20.04.5 LTS, python 3.10.13, datasets 2.14.7). It only takes me 1-2 miniutes. Could you give me some suggestions? Thank you. ### Steps to reproduce the bug from datasets import load_dataset dataset = load_dataset('json', data_files='mypath/oaast_rm_zh.json') ### Expected behavior I hope it can load the file successfully. ### Environment info OS: Debian GNU/Linux 10 Python: Python 3.10.13 Pip list: Package Version ------------------------- ------------ accelerate 0.25.0 addict 2.4.0 aiofiles 23.2.1 aiohttp 3.9.1 aiosignal 1.3.1 aliyun-python-sdk-core 2.14.0 aliyun-python-sdk-kms 2.16.2 altair 5.2.0 annotated-types 0.6.0 anyio 3.7.1 async-timeout 4.0.3 attrs 23.1.0 certifi 2023.11.17 cffi 1.16.0 charset-normalizer 3.3.2 click 8.1.7 contourpy 1.2.0 crcmod 1.7 cryptography 41.0.7 cycler 0.12.1 datasets 2.14.7 dill 0.3.7 docstring-parser 0.15 einops 0.7.0 exceptiongroup 1.2.0 fastapi 0.105.0 ffmpy 0.3.1 filelock 3.13.1 fonttools 4.46.0 frozenlist 1.4.1 fsspec 2023.10.0 gast 0.5.4 gradio 3.50.2 gradio_client 0.6.1 h11 0.14.0 httpcore 1.0.2 httpx 0.25.2 huggingface-hub 0.19.4 idna 3.6 importlib-metadata 7.0.0 importlib-resources 6.1.1 jieba 0.42.1 Jinja2 3.1.2 jmespath 0.10.0 joblib 1.3.2 jsonschema 4.20.0 jsonschema-specifications 2023.11.2 kiwisolver 1.4.5 markdown-it-py 3.0.0 MarkupSafe 2.1.3 matplotlib 3.8.2 mdurl 0.1.2 modelscope 1.10.0 mpmath 1.3.0 multidict 6.0.4 multiprocess 0.70.15 networkx 3.2.1 nltk 3.8.1 numpy 1.26.2 nvidia-cublas-cu12 12.1.3.1 nvidia-cuda-cupti-cu12 12.1.105 nvidia-cuda-nvrtc-cu12 12.1.105 nvidia-cuda-runtime-cu12 12.1.105 nvidia-cudnn-cu12 8.9.2.26 nvidia-cufft-cu12 11.0.2.54 nvidia-curand-cu12 10.3.2.106 nvidia-cusolver-cu12 11.4.5.107 nvidia-cusparse-cu12 12.1.0.106 nvidia-nccl-cu12 2.18.1 nvidia-nvjitlink-cu12 12.3.101 nvidia-nvtx-cu12 12.1.105 orjson 3.9.10 oss2 2.18.3 packaging 23.2 pandas 2.1.4 peft 0.7.1 Pillow 10.1.0 pip 23.3.1 platformdirs 4.1.0 protobuf 4.25.1 psutil 5.9.6 pyarrow 14.0.1 pyarrow-hotfix 0.6 pycparser 2.21 pycryptodome 3.19.0 pydantic 2.5.2 pydantic_core 2.14.5 pydub 0.25.1 Pygments 2.17.2 pyparsing 3.1.1 python-dateutil 2.8.2 python-multipart 0.0.6 pytz 2023.3.post1 PyYAML 6.0.1 referencing 0.32.0 regex 2023.10.3 requests 2.31.0 rich 13.7.0 rouge-chinese 1.0.3 rpds-py 0.13.2 safetensors 0.4.1 scipy 1.11.4 semantic-version 2.10.0 sentencepiece 0.1.99 setuptools 68.2.2 shtab 1.6.5 simplejson 3.19.2 six 1.16.0 sniffio 1.3.0 sortedcontainers 2.4.0 sse-starlette 1.8.2 starlette 0.27.0 sympy 1.12 tiktoken 0.5.2 tokenizers 0.15.0 tomli 2.0.1 toolz 0.12.0 torch 2.1.2 tqdm 4.66.1 transformers 4.36.1 triton 2.1.0 trl 0.7.4 typing_extensions 4.9.0 tyro 0.6.0 tzdata 2023.3 urllib3 2.1.0 uvicorn 0.24.0.post1 websockets 11.0.3 wheel 0.41.2 xxhash 3.4.1 yapf 0.40.2 yarl 1.9.4 zipp 3.17.0
null
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6505/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6505/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5343
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5343/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5343/comments
https://api.github.com/repos/huggingface/datasets/issues/5343/events
https://github.com/huggingface/datasets/issues/5343
1,485,297,823
I_kwDODunzps5Yh9if
5,343
T5 for Q&A produces truncated sentence
{ "avatar_url": "https://avatars.githubusercontent.com/u/13484072?v=4", "events_url": "https://api.github.com/users/junyongyou/events{/privacy}", "followers_url": "https://api.github.com/users/junyongyou/followers", "following_url": "https://api.github.com/users/junyongyou/following{/other_user}", "gists_url": "https://api.github.com/users/junyongyou/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/junyongyou", "id": 13484072, "login": "junyongyou", "node_id": "MDQ6VXNlcjEzNDg0MDcy", "organizations_url": "https://api.github.com/users/junyongyou/orgs", "received_events_url": "https://api.github.com/users/junyongyou/received_events", "repos_url": "https://api.github.com/users/junyongyou/repos", "site_admin": false, "starred_url": "https://api.github.com/users/junyongyou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/junyongyou/subscriptions", "type": "User", "url": "https://api.github.com/users/junyongyou", "user_view_type": "public" }
[]
closed
false
null
[]
null
[]
2022-12-08T19:48:46Z
2022-12-08T19:57:17Z
2022-12-08T19:57:17Z
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
Dear all, I am fine-tuning T5 for Q&A task using the MedQuAD ([GitHub - abachaa/MedQuAD: Medical Question Answering Dataset of 47,457 QA pairs created from 12 NIH websites](https://github.com/abachaa/MedQuAD)) dataset. In the dataset, there are many long answers with thousands of words. I have used pytorch_lightning to train the T5-large model. I have two questions. For example, I set both the max_length, max_input_length, max_output_length to 128. How to deal with those long answers? I just left them as is and the T5Tokenizer can automatically handle. I would assume the tokenizer just truncates an answer at the position of 128th word (or 127th). Is it possible that I manually split an answer into different parts, each part has 128 words; and then all these sub-answers serve as a separate answer to the same question? Another question is that I get incomplete (truncated) answers when using the fine-tuned model in inference, even though the predicted answer is shorter than 128 words. I found a message posted 2 years ago saying that one should add at the end of texts when fine-tuning T5. I followed that but then got a warning message that duplicated were found. I am assuming that this is because the tokenizer truncates an answer text, thus is missing in the truncated answer, such that the end token is not produced in predicted answer. However, I am not sure. Can anybody point out how to address this issue? Any suggestions are highly appreciated. Below is some code snippet. ` import pytorch_lightning as pl from torch.utils.data import DataLoader import torch import numpy as np import time from pathlib import Path from transformers import ( Adafactor, T5ForConditionalGeneration, T5Tokenizer, get_linear_schedule_with_warmup ) from torch.utils.data import RandomSampler from question_answering.utils import * class T5FineTuner(pl.LightningModule): def __init__(self, hyparams): super(T5FineTuner, self).__init__() self.hyparams = hyparams self.model = T5ForConditionalGeneration.from_pretrained(hyparams.model_name_or_path) self.tokenizer = T5Tokenizer.from_pretrained(hyparams.tokenizer_name_or_path) if self.hyparams.freeze_embeds: self.freeze_embeds() if self.hyparams.freeze_encoder: self.freeze_params(self.model.get_encoder()) # assert_all_frozen() self.step_count = 0 self.output_dir = Path(self.hyparams.output_dir) n_observations_per_split = { 'train': self.hyparams.n_train, 'validation': self.hyparams.n_val, 'test': self.hyparams.n_test } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.em_score_list = [] self.subset_score_list = [] data_folder = r'C:\Datasets\MedQuAD-master' self.train_data, self.val_data, self.test_data = load_medqa_data(data_folder) def freeze_params(self, model): for param in model.parameters(): param.requires_grad = False def freeze_embeds(self): try: self.freeze_params(self.model.model.shared) for d in [self.model.model.encoder, self.model.model.decoder]: self.freeze_params(d.embed_positions) self.freeze_params(d.embed_tokens) except AttributeError: self.freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: self.freeze_params(d.embed_tokens) def lmap(self, f, x): return list(map(f, x)) def is_logger(self): return self.trainer.proc_rank <= 0 def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, labels=None): return self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels ) def _step(self, batch): labels = batch['target_ids'] labels[labels[:, :] == self.tokenizer.pad_token_id] = -100 outputs = self( input_ids = batch['source_ids'], attention_mask=batch['source_mask'], labels=labels, decoder_attention_mask=batch['target_mask'] ) loss = outputs[0] return loss def ids_to_clean_text(self, generated_ids): gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) return self.lmap(str.strip, gen_text) def _generative_step(self, batch): t0 = time.time() generated_ids = self.model.generate( batch["source_ids"], attention_mask=batch["source_mask"], use_cache=True, decoder_attention_mask=batch['target_mask'], max_length=128, num_beams=2, early_stopping=True ) preds = self.ids_to_clean_text(generated_ids) targets = self.ids_to_clean_text(batch["target_ids"]) gen_time = (time.time() - t0) / batch["source_ids"].shape[0] loss = self._step(batch) base_metrics = {'val_loss': loss} summ_len = np.mean(self.lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=targets) em_score, subset_match_score = calculate_scores(preds, targets) self.em_score_list.append(em_score) self.subset_score_list.append(subset_match_score) em_score = torch.tensor(em_score, dtype=torch.float32) subset_match_score = torch.tensor(subset_match_score, dtype=torch.float32) base_metrics.update(em_score=em_score, subset_match_score=subset_match_score) # rouge_results = self.rouge_metric.compute() # rouge_dict = self.parse_score(rouge_results) return base_metrics def training_step(self, batch, batch_idx): loss = self._step(batch) tensorboard_logs = {'train_loss': loss} return {'loss': loss, 'log': tensorboard_logs} def training_epoch_end(self, outputs): avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean() tensorboard_logs = {'avg_train_loss': avg_train_loss} # return {'avg_train_loss': avg_train_loss, 'log': tensorboard_logs, 'progress_bar': tensorboard_logs} def validation_step(self, batch, batch_idx): return self._generative_step(batch) def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} if len(self.em_score_list) <= 2: average_em_score = sum(self.em_score_list) / len(self.em_score_list) average_subset_match_score = sum(self.subset_score_list) / len(self.subset_score_list) else: latest_em_score = self.em_score_list[:-2] latest_subset_score = self.subset_score_list[:-2] average_em_score = sum(latest_em_score) / len(latest_em_score) average_subset_match_score = sum(latest_subset_score) / len(latest_subset_score) average_em_score = torch.tensor(average_em_score, dtype=torch.float32) average_subset_match_score = torch.tensor(average_subset_match_score, dtype=torch.float32) tensorboard_logs.update(em_score=average_em_score, subset_match_score=average_subset_match_score) self.target_gen = [] self.prediction_gen = [] return { 'avg_val_loss': avg_loss, 'em_score': average_em_score, 'subset_match_socre': average_subset_match_score, 'log': tensorboard_logs, 'progress_bar': tensorboard_logs } def configure_optimizers(self): model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hyparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hyparams.learning_rate, scale_parameter=False, relative_step=False) self.opt = optimizer return [optimizer] def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False): optimizer.step(closure=optimizer_closure) optimizer.zero_grad() self.lr_scheduler.step() def get_tqdm_dict(self): tqdm_dict = {"loss": "{:.3f}".format(self.trainer.avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]} return tqdm_dict def train_dataloader(self): n_samples = self.n_obs['train'] train_dataset = get_dataset(tokenizer=self.tokenizer, data=self.train_data, num_samples=n_samples, args=self.hyparams) sampler = RandomSampler(train_dataset) dataloader = DataLoader(train_dataset, sampler=sampler, batch_size=self.hyparams.train_batch_size, drop_last=True, num_workers=4) # t_total = ( # (len(dataloader.dataset) // (self.hyparams.train_batch_size * max(1, self.hyparams.n_gpu))) # // self.hyparams.gradient_accumulation_steps # * float(self.hyparams.num_train_epochs) # ) t_total = 100000 scheduler = get_linear_schedule_with_warmup( self.opt, num_warmup_steps=self.hyparams.warmup_steps, num_training_steps=t_total ) self.lr_scheduler = scheduler return dataloader def val_dataloader(self): n_samples = self.n_obs['validation'] validation_dataset = get_dataset(tokenizer=self.tokenizer, data=self.val_data, num_samples=n_samples, args=self.hyparams) sampler = RandomSampler(validation_dataset) return DataLoader(validation_dataset, shuffle=False, batch_size=self.hyparams.eval_batch_size, sampler=sampler, num_workers=4) def test_dataloader(self): n_samples = self.n_obs['test'] test_dataset = get_dataset(tokenizer=self.tokenizer, data=self.test_data, num_samples=n_samples, args=self.hyparams) return DataLoader(test_dataset, batch_size=self.hyparams.eval_batch_size, num_workers=4) def on_save_checkpoint(self, checkpoint): save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) import os import argparse import pytorch_lightning as pl from question_answering.t5_closed_book import T5FineTuner if __name__ == '__main__': args_dict = dict( output_dir="", # path to save the checkpoints model_name_or_path='t5-large', tokenizer_name_or_path='t5-large', max_input_length=128, max_output_length=128, freeze_encoder=False, freeze_embeds=False, learning_rate=1e-5, weight_decay=0.0, adam_epsilon=1e-8, warmup_steps=0, train_batch_size=4, eval_batch_size=4, num_train_epochs=2, gradient_accumulation_steps=10, n_gpu=1, resume_from_checkpoint=None, val_check_interval=0.5, n_val=4000, n_train=-1, n_test=-1, early_stop_callback=False, fp_16=False, opt_level='O1', max_grad_norm=1.0, seed=101, ) args_dict.update({'output_dir': 't5_large_MedQuAD_256', 'num_train_epochs': 100, 'train_batch_size': 16, 'eval_batch_size': 16, 'learning_rate': 1e-3}) args = argparse.Namespace(**args_dict) checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor="em_score", mode="max", save_top_k=1) ## If resuming from checkpoint, add an arg resume_from_checkpoint train_params = dict( accumulate_grad_batches=args.gradient_accumulation_steps, gpus=args.n_gpu, max_epochs=args.num_train_epochs, # early_stop_callback=False, precision=16 if args.fp_16 else 32, # amp_level=args.opt_level, # resume_from_checkpoint=args.resume_from_checkpoint, gradient_clip_val=args.max_grad_norm, checkpoint_callback=checkpoint_callback, val_check_interval=args.val_check_interval, # accelerator='dp' # logger=wandb_logger, # callbacks=[LoggingCallback()], ) model = T5FineTuner(args) trainer = pl.Trainer(**train_params) trainer.fit(model) `
{ "avatar_url": "https://avatars.githubusercontent.com/u/13484072?v=4", "events_url": "https://api.github.com/users/junyongyou/events{/privacy}", "followers_url": "https://api.github.com/users/junyongyou/followers", "following_url": "https://api.github.com/users/junyongyou/following{/other_user}", "gists_url": "https://api.github.com/users/junyongyou/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/junyongyou", "id": 13484072, "login": "junyongyou", "node_id": "MDQ6VXNlcjEzNDg0MDcy", "organizations_url": "https://api.github.com/users/junyongyou/orgs", "received_events_url": "https://api.github.com/users/junyongyou/received_events", "repos_url": "https://api.github.com/users/junyongyou/repos", "site_admin": false, "starred_url": "https://api.github.com/users/junyongyou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/junyongyou/subscriptions", "type": "User", "url": "https://api.github.com/users/junyongyou", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5343/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5343/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/7176
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7176/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7176/comments
https://api.github.com/repos/huggingface/datasets/issues/7176/events
https://github.com/huggingface/datasets/pull/7176
2,551,025,564
PR_kwDODunzps580hTn
7,176
fix grammar in fingerprint.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/13238952?v=4", "events_url": "https://api.github.com/users/jxmorris12/events{/privacy}", "followers_url": "https://api.github.com/users/jxmorris12/followers", "following_url": "https://api.github.com/users/jxmorris12/following{/other_user}", "gists_url": "https://api.github.com/users/jxmorris12/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jxmorris12", "id": 13238952, "login": "jxmorris12", "node_id": "MDQ6VXNlcjEzMjM4OTUy", "organizations_url": "https://api.github.com/users/jxmorris12/orgs", "received_events_url": "https://api.github.com/users/jxmorris12/received_events", "repos_url": "https://api.github.com/users/jxmorris12/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jxmorris12/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jxmorris12/subscriptions", "type": "User", "url": "https://api.github.com/users/jxmorris12", "user_view_type": "public" }
[]
open
false
null
[]
null
[]
2024-09-26T16:13:42Z
2024-09-26T16:13:42Z
null
CONTRIBUTOR
null
null
null
I see this error all the time and it was starting to get to me.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7176/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7176/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7176.diff", "html_url": "https://github.com/huggingface/datasets/pull/7176", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/7176.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7176" }
https://api.github.com/repos/huggingface/datasets/issues/5668
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5668/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5668/comments
https://api.github.com/repos/huggingface/datasets/issues/5668/events
https://github.com/huggingface/datasets/pull/5668
1,638,018,598
PR_kwDODunzps5MwuIp
5,668
Support for downloading only provided split
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5668). All of your documentation changes will be reflected on that endpoint.", "My previous comment didn't create the retro-link in the PR. I write it here again.\r\n\r\nYou can check the context and the discussions we had about this feature enhancement in this PR:\r\n- #2249" ]
2023-03-23T17:53:39Z
2023-03-24T06:43:14Z
null
CONTRIBUTOR
null
null
null
We can pass split to `_split_generators()`. But I'm not sure if it's possible to solve cache issues, mostly with `dataset_info.json`
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5668/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5668/timeline
null
null
1
{ "diff_url": "https://github.com/huggingface/datasets/pull/5668.diff", "html_url": "https://github.com/huggingface/datasets/pull/5668", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5668.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5668" }
https://api.github.com/repos/huggingface/datasets/issues/5032
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5032/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5032/comments
https://api.github.com/repos/huggingface/datasets/issues/5032/events
https://github.com/huggingface/datasets/issues/5032
1,388,270,935
I_kwDODunzps5Sv1VX
5,032
new dataset type: single-label and multi-label video classification
{ "avatar_url": "https://avatars.githubusercontent.com/u/34196005?v=4", "events_url": "https://api.github.com/users/fcakyon/events{/privacy}", "followers_url": "https://api.github.com/users/fcakyon/followers", "following_url": "https://api.github.com/users/fcakyon/following{/other_user}", "gists_url": "https://api.github.com/users/fcakyon/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fcakyon", "id": 34196005, "login": "fcakyon", "node_id": "MDQ6VXNlcjM0MTk2MDA1", "organizations_url": "https://api.github.com/users/fcakyon/orgs", "received_events_url": "https://api.github.com/users/fcakyon/received_events", "repos_url": "https://api.github.com/users/fcakyon/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fcakyon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fcakyon/subscriptions", "type": "User", "url": "https://api.github.com/users/fcakyon", "user_view_type": "public" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "Hi ! You can in the `features` folder how we implemented the audio and image feature types.\r\n\r\nWe can have something similar to videos. What we need to decide:\r\n- the video loading library to use\r\n- the output format when a user accesses a video type object\r\n- what parameters a `Video()` feature type needs\r\n\r\nalso cc @nateraw who also took a look at what we can do for video", "@lhoestq @nateraw is there any progress on adding video classification datasets? ", "Hi ! I think we just missing which lib we're going to use to decode the videos + which parameters must go in the `Video` type", "Hmm. `decord` could be nice but it's no longer maintained [it seems](https://github.com/dmlc/decord/issues/214). ", "pytorchvideo uses [pyav](https://github.com/PyAV-Org/PyAV) as the default decoder: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/labeled_video_dataset.py#L37\r\n\r\nAlso it would be great if `optionally` audio can also be decoded from the video as in pytorchvideo: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/labeled_video_dataset.py#L35\r\n\r\nHere are the other decoders supported in pytorchvideo: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/encoded_video.py#L17\r\n", "@sayakpaul I did do quite a bit of work on [this PR](https://github.com/huggingface/datasets/pull/4532) a while back to add a video feature. It's outdated, but uses my `encoded_video` [package](https://github.com/nateraw/encoded-video) under the hood, which is basically a wrapper around PyAV stolen from [pytorchvideo](https://github.com/facebookresearch/pytorchvideo/) that gets rid of the `torch` dependency. \r\n\r\nwould be really great to get something like this in...it's just a really tricky and time consuming feature to add. " ]
2022-09-27T19:40:11Z
2022-11-02T19:10:13Z
null
NONE
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
**Is your feature request related to a problem? Please describe.** In my research, I am dealing with multi-modal (audio+text+frame sequence) video classification. It would be great if the datasets library supported generating multi-modal batches from a video dataset. **Describe the solution you'd like** Assume I have video files having single/multiple labels. I want to train a single/multi-label video classification model. I want datasets to support generating multi-modal batches (audio+frame sequence) from video files. Audio waveform and frame sequence can be extracted from each video clip then I can use any audio, image and video model from transformers library to extract features which will be fed into my model. **Describe alternatives you've considered** Currently, I am using https://github.com/facebookresearch/pytorchvideo dataloaders. There seems to be not much alternative. **Additional context** I am wiling to open a PR but don't know where to start.
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5032/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5032/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/5015
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5015/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5015/comments
https://api.github.com/repos/huggingface/datasets/issues/5015/events
https://github.com/huggingface/datasets/issues/5015
1,383,485,558
I_kwDODunzps5SdlB2
5,015
Transfer dataset scripts to Hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" } ]
null
[ "Sounds good ! Can I help with anything ?" ]
2022-09-23T08:48:10Z
2022-10-05T07:15:57Z
2022-10-05T07:15:57Z
MEMBER
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
Before merging: - #4974 TODO: - [x] Create label: ["dataset contribution"](https://github.com/huggingface/datasets/pulls?q=label%3A%22dataset+contribution%22) - [x] Create project: [Datasets: Transfer datasets to Hub](https://github.com/orgs/huggingface/projects/22/) - [x] PRs: - [x] Add dataset: we should recommend transfer all additions of datasets to the Hub, under the appropriate namespace; no more additions of datasets on GitHub - [x] Update dataset: in general, we should merge bug fixes; enhancements should be considered on a case-by-case basis, depending on whether there is a more suitable namespace on the Hub - [ ] Issues Finally: - [x] #4974 Let me know what you think! :hugs:
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5015/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5015/timeline
null
completed
null
null
https://api.github.com/repos/huggingface/datasets/issues/6342
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6342/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6342/comments
https://api.github.com/repos/huggingface/datasets/issues/6342/events
https://github.com/huggingface/datasets/pull/6342
1,957,344,445
PR_kwDODunzps5dijxt
6,342
Release: 2.14.6
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007051 / 0.011353 (-0.004302) | 0.004291 / 0.011008 (-0.006717) | 0.085557 / 0.038508 (0.047048) | 0.087919 / 0.023109 (0.064810) | 0.356912 / 0.275898 (0.081014) | 0.394835 / 0.323480 (0.071355) | 0.004464 / 0.007986 (-0.003522) | 0.003688 / 0.004328 (-0.000640) | 0.065437 / 0.004250 (0.061186) | 0.060156 / 0.037052 (0.023103) | 0.361807 / 0.258489 (0.103318) | 0.420917 / 0.293841 (0.127076) | 0.031704 / 0.128546 (-0.096842) | 0.008921 / 0.075646 (-0.066726) | 0.287828 / 0.419271 (-0.131443) | 0.053600 / 0.043533 (0.010067) | 0.361833 / 0.255139 (0.106694) | 0.396732 / 0.283200 (0.113532) | 0.025874 / 0.141683 (-0.115809) | 1.474926 / 1.452155 (0.022771) | 1.563186 / 1.492716 (0.070469) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.316823 / 0.018006 (0.298817) | 0.604085 / 0.000490 (0.603595) | 0.020828 / 0.000200 (0.020628) | 0.000351 / 0.000054 (0.000297) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030468 / 0.037411 (-0.006943) | 0.083904 / 0.014526 (0.069378) | 0.103019 / 0.176557 (-0.073537) | 0.159018 / 0.737135 (-0.578117) | 0.102737 / 0.296338 (-0.193602) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.405311 / 0.215209 (0.190102) | 4.029060 / 2.077655 (1.951406) | 2.046590 / 1.504120 (0.542470) | 1.919335 / 1.541195 (0.378140) | 2.030371 / 1.468490 (0.561881) | 0.484209 / 4.584777 (-4.100568) | 3.486888 / 3.745712 (-0.258824) | 3.390777 / 5.269862 (-1.879084) | 2.110744 / 4.565676 (-2.454933) | 0.056587 / 0.424275 (-0.367688) | 0.007766 / 0.007607 (0.000159) | 0.488217 / 0.226044 (0.262173) | 4.853904 / 2.268929 (2.584976) | 2.595122 / 55.444624 (-52.849502) | 2.217712 / 6.876477 (-4.658765) | 2.500368 / 2.142072 (0.358296) | 0.580843 / 4.805227 (-4.224384) | 0.132719 / 6.500664 (-6.367945) | 0.060202 / 0.075469 (-0.015267) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.260748 / 1.841788 (-0.581040) | 20.148848 / 8.074308 (12.074540) | 14.738779 / 10.191392 (4.547387) | 0.167562 / 0.680424 (-0.512862) | 0.018944 / 0.534201 (-0.515257) | 0.394314 / 0.579283 (-0.184969) | 0.409345 / 0.434364 (-0.025019) | 0.458743 / 0.540337 (-0.081594) | 0.638175 / 1.386936 (-0.748761) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007097 / 0.011353 (-0.004256) | 0.004304 / 0.011008 (-0.006705) | 0.065539 / 0.038508 (0.027030) | 0.094078 / 0.023109 (0.070969) | 0.412411 / 0.275898 (0.136513) | 0.441900 / 0.323480 (0.118420) | 0.006038 / 0.007986 (-0.001948) | 0.003647 / 0.004328 (-0.000682) | 0.065298 / 0.004250 (0.061048) | 0.062571 / 0.037052 (0.025518) | 0.405156 / 0.258489 (0.146667) | 0.443779 / 0.293841 (0.149938) | 0.034470 / 0.128546 (-0.094077) | 0.008858 / 0.075646 (-0.066789) | 0.071840 / 0.419271 (-0.347431) | 0.050468 / 0.043533 (0.006935) | 0.404198 / 0.255139 (0.149059) | 0.430196 / 0.283200 (0.146997) | 0.025710 / 0.141683 (-0.115973) | 1.525374 / 1.452155 (0.073219) | 1.591830 / 1.492716 (0.099114) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.294330 / 0.018006 (0.276324) | 0.516943 / 0.000490 (0.516453) | 0.004807 / 0.000200 (0.004607) | 0.000103 / 0.000054 (0.000048) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.034505 / 0.037411 (-0.002907) | 0.096645 / 0.014526 (0.082119) | 0.111926 / 0.176557 (-0.064630) | 0.165241 / 0.737135 (-0.571894) | 0.111834 / 0.296338 (-0.184504) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.436370 / 0.215209 (0.221161) | 4.357568 / 2.077655 (2.279913) | 2.360529 / 1.504120 (0.856409) | 2.196375 / 1.541195 (0.655180) | 2.307481 / 1.468490 (0.838991) | 0.494072 / 4.584777 (-4.090705) | 3.565078 / 3.745712 (-0.180634) | 3.405174 / 5.269862 (-1.864688) | 2.203307 / 4.565676 (-2.362369) | 0.058582 / 0.424275 (-0.365693) | 0.007410 / 0.007607 (-0.000197) | 0.514323 / 0.226044 (0.288279) | 5.139834 / 2.268929 (2.870905) | 2.884111 / 55.444624 (-52.560513) | 2.589021 / 6.876477 (-4.287456) | 2.787577 / 2.142072 (0.645504) | 0.590765 / 4.805227 (-4.214462) | 0.135237 / 6.500664 (-6.365427) | 0.061078 / 0.075469 (-0.014391) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.346938 / 1.841788 (-0.494850) | 21.009948 / 8.074308 (12.935640) | 15.203281 / 10.191392 (5.011889) | 0.166208 / 0.680424 (-0.514216) | 0.020634 / 0.534201 (-0.513567) | 0.413825 / 0.579283 (-0.165458) | 0.416477 / 0.434364 (-0.017887) | 0.485888 / 0.540337 (-0.054449) | 0.664941 / 1.386936 (-0.721995) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#395b30ee2c0f6088e28fe78a3e61b591e40a4668 \"CML watermark\")\n", "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005927 / 0.011353 (-0.005425) | 0.003622 / 0.011008 (-0.007386) | 0.081414 / 0.038508 (0.042906) | 0.061031 / 0.023109 (0.037922) | 0.358323 / 0.275898 (0.082425) | 0.394192 / 0.323480 (0.070712) | 0.003471 / 0.007986 (-0.004515) | 0.002930 / 0.004328 (-0.001399) | 0.064215 / 0.004250 (0.059964) | 0.048678 / 0.037052 (0.011625) | 0.367966 / 0.258489 (0.109477) | 0.412618 / 0.293841 (0.118777) | 0.027192 / 0.128546 (-0.101355) | 0.007921 / 0.075646 (-0.067725) | 0.262213 / 0.419271 (-0.157059) | 0.044750 / 0.043533 (0.001217) | 0.351573 / 0.255139 (0.096434) | 0.389000 / 0.283200 (0.105800) | 0.020842 / 0.141683 (-0.120840) | 1.448925 / 1.452155 (-0.003229) | 1.530478 / 1.492716 (0.037761) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.227787 / 0.018006 (0.209780) | 0.423161 / 0.000490 (0.422671) | 0.007557 / 0.000200 (0.007357) | 0.000205 / 0.000054 (0.000150) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024703 / 0.037411 (-0.012709) | 0.074044 / 0.014526 (0.059518) | 0.085520 / 0.176557 (-0.091037) | 0.146132 / 0.737135 (-0.591003) | 0.085637 / 0.296338 (-0.210701) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.393177 / 0.215209 (0.177968) | 3.926740 / 2.077655 (1.849085) | 1.892420 / 1.504120 (0.388300) | 1.716844 / 1.541195 (0.175650) | 1.784040 / 1.468490 (0.315550) | 0.499570 / 4.584777 (-4.085207) | 3.057764 / 3.745712 (-0.687948) | 2.885463 / 5.269862 (-2.384399) | 1.905206 / 4.565676 (-2.660471) | 0.058216 / 0.424275 (-0.366059) | 0.006805 / 0.007607 (-0.000802) | 0.465406 / 0.226044 (0.239361) | 4.658569 / 2.268929 (2.389641) | 2.461737 / 55.444624 (-52.982887) | 2.170620 / 6.876477 (-4.705856) | 2.373715 / 2.142072 (0.231643) | 0.592818 / 4.805227 (-4.212409) | 0.127960 / 6.500664 (-6.372704) | 0.061696 / 0.075469 (-0.013773) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.229073 / 1.841788 (-0.612715) | 17.832087 / 8.074308 (9.757778) | 13.889485 / 10.191392 (3.698093) | 0.142237 / 0.680424 (-0.538187) | 0.016752 / 0.534201 (-0.517449) | 0.338342 / 0.579283 (-0.240941) | 0.383933 / 0.434364 (-0.050431) | 0.393017 / 0.540337 (-0.147320) | 0.557621 / 1.386936 (-0.829315) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006218 / 0.011353 (-0.005135) | 0.003679 / 0.011008 (-0.007329) | 0.062934 / 0.038508 (0.024426) | 0.066764 / 0.023109 (0.043655) | 0.482737 / 0.275898 (0.206839) | 0.483241 / 0.323480 (0.159761) | 0.004828 / 0.007986 (-0.003158) | 0.002880 / 0.004328 (-0.001448) | 0.063111 / 0.004250 (0.058861) | 0.049500 / 0.037052 (0.012448) | 0.453155 / 0.258489 (0.194666) | 0.488776 / 0.293841 (0.194935) | 0.028568 / 0.128546 (-0.099978) | 0.008490 / 0.075646 (-0.067157) | 0.068202 / 0.419271 (-0.351069) | 0.040695 / 0.043533 (-0.002838) | 0.457473 / 0.255139 (0.202334) | 0.471968 / 0.283200 (0.188768) | 0.021261 / 0.141683 (-0.120422) | 1.476304 / 1.452155 (0.024150) | 1.503433 / 1.492716 (0.010716) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.227108 / 0.018006 (0.209102) | 0.428330 / 0.000490 (0.427840) | 0.004637 / 0.000200 (0.004437) | 0.000074 / 0.000054 (0.000020) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027253 / 0.037411 (-0.010158) | 0.081990 / 0.014526 (0.067464) | 0.092763 / 0.176557 (-0.083794) | 0.146155 / 0.737135 (-0.590981) | 0.093175 / 0.296338 (-0.203164) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.464585 / 0.215209 (0.249376) | 4.630704 / 2.077655 (2.553050) | 2.583272 / 1.504120 (1.079152) | 2.393810 / 1.541195 (0.852615) | 2.463255 / 1.468490 (0.994765) | 0.507045 / 4.584777 (-4.077732) | 3.181972 / 3.745712 (-0.563740) | 2.902321 / 5.269862 (-2.367541) | 1.905431 / 4.565676 (-2.660246) | 0.059427 / 0.424275 (-0.364848) | 0.006387 / 0.007607 (-0.001220) | 0.542247 / 0.226044 (0.316203) | 5.426868 / 2.268929 (3.157939) | 3.073489 / 55.444624 (-52.371136) | 2.719620 / 6.876477 (-4.156857) | 2.861865 / 2.142072 (0.719793) | 0.593757 / 4.805227 (-4.211471) | 0.125439 / 6.500664 (-6.375225) | 0.060901 / 0.075469 (-0.014568) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.359938 / 1.841788 (-0.481850) | 18.484867 / 8.074308 (10.410559) | 14.685645 / 10.191392 (4.494253) | 0.164098 / 0.680424 (-0.516325) | 0.018090 / 0.534201 (-0.516111) | 0.339760 / 0.579283 (-0.239523) | 0.376668 / 0.434364 (-0.057696) | 0.396963 / 0.540337 (-0.143374) | 0.549305 / 1.386936 (-0.837631) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0c896f4195ec8a91e09f8bb9a57950bcec8b8450 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006052 / 0.011353 (-0.005301) | 0.003715 / 0.011008 (-0.007293) | 0.079646 / 0.038508 (0.041138) | 0.059053 / 0.023109 (0.035944) | 0.393016 / 0.275898 (0.117118) | 0.424758 / 0.323480 (0.101278) | 0.005407 / 0.007986 (-0.002578) | 0.002920 / 0.004328 (-0.001408) | 0.062145 / 0.004250 (0.057894) | 0.047289 / 0.037052 (0.010237) | 0.399848 / 0.258489 (0.141359) | 0.434239 / 0.293841 (0.140398) | 0.027388 / 0.128546 (-0.101158) | 0.007967 / 0.075646 (-0.067680) | 0.262546 / 0.419271 (-0.156725) | 0.045014 / 0.043533 (0.001482) | 0.398086 / 0.255139 (0.142947) | 0.414615 / 0.283200 (0.131415) | 0.020410 / 0.141683 (-0.121272) | 1.447276 / 1.452155 (-0.004879) | 1.512390 / 1.492716 (0.019673) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.224854 / 0.018006 (0.206847) | 0.434173 / 0.000490 (0.433683) | 0.010091 / 0.000200 (0.009891) | 0.000259 / 0.000054 (0.000205) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025316 / 0.037411 (-0.012095) | 0.073284 / 0.014526 (0.058758) | 0.085177 / 0.176557 (-0.091379) | 0.148905 / 0.737135 (-0.588230) | 0.084696 / 0.296338 (-0.211642) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.438259 / 0.215209 (0.223050) | 4.380679 / 2.077655 (2.303025) | 2.310329 / 1.504120 (0.806209) | 2.144002 / 1.541195 (0.602807) | 2.203761 / 1.468490 (0.735270) | 0.500559 / 4.584777 (-4.084218) | 3.031172 / 3.745712 (-0.714540) | 2.839425 / 5.269862 (-2.430436) | 1.878391 / 4.565676 (-2.687285) | 0.057325 / 0.424275 (-0.366950) | 0.006719 / 0.007607 (-0.000888) | 0.510122 / 0.226044 (0.284078) | 5.108632 / 2.268929 (2.839704) | 2.805716 / 55.444624 (-52.638909) | 2.422183 / 6.876477 (-4.454293) | 2.635280 / 2.142072 (0.493207) | 0.589351 / 4.805227 (-4.215876) | 0.125416 / 6.500664 (-6.375248) | 0.061142 / 0.075469 (-0.014327) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.234997 / 1.841788 (-0.606791) | 17.731828 / 8.074308 (9.657520) | 13.858081 / 10.191392 (3.666689) | 0.145975 / 0.680424 (-0.534449) | 0.016827 / 0.534201 (-0.517374) | 0.335701 / 0.579283 (-0.243582) | 0.361867 / 0.434364 (-0.072497) | 0.394620 / 0.540337 (-0.145718) | 0.532146 / 1.386936 (-0.854790) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006091 / 0.011353 (-0.005262) | 0.003663 / 0.011008 (-0.007345) | 0.062596 / 0.038508 (0.024088) | 0.061649 / 0.023109 (0.038539) | 0.440647 / 0.275898 (0.164749) | 0.472974 / 0.323480 (0.149494) | 0.005009 / 0.007986 (-0.002976) | 0.002879 / 0.004328 (-0.001449) | 0.062815 / 0.004250 (0.058565) | 0.049000 / 0.037052 (0.011947) | 0.442990 / 0.258489 (0.184501) | 0.477622 / 0.293841 (0.183781) | 0.028512 / 0.128546 (-0.100034) | 0.008031 / 0.075646 (-0.067615) | 0.067853 / 0.419271 (-0.351418) | 0.040823 / 0.043533 (-0.002710) | 0.437811 / 0.255139 (0.182672) | 0.464615 / 0.283200 (0.181416) | 0.021348 / 0.141683 (-0.120334) | 1.479230 / 1.452155 (0.027075) | 1.544053 / 1.492716 (0.051337) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.210697 / 0.018006 (0.192691) | 0.436450 / 0.000490 (0.435960) | 0.003413 / 0.000200 (0.003213) | 0.000089 / 0.000054 (0.000035) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.027190 / 0.037411 (-0.010222) | 0.083254 / 0.014526 (0.068728) | 0.092936 / 0.176557 (-0.083620) | 0.147261 / 0.737135 (-0.589874) | 0.092910 / 0.296338 (-0.203429) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.454195 / 0.215209 (0.238986) | 4.569122 / 2.077655 (2.491468) | 2.497198 / 1.504120 (0.993079) | 2.314337 / 1.541195 (0.773142) | 2.378471 / 1.468490 (0.909981) | 0.515402 / 4.584777 (-4.069375) | 3.199374 / 3.745712 (-0.546338) | 2.899300 / 5.269862 (-2.370562) | 1.873314 / 4.565676 (-2.692362) | 0.058820 / 0.424275 (-0.365455) | 0.006651 / 0.007607 (-0.000957) | 0.526681 / 0.226044 (0.300636) | 5.275232 / 2.268929 (3.006303) | 2.969107 / 55.444624 (-52.475517) | 2.600959 / 6.876477 (-4.275518) | 2.762930 / 2.142072 (0.620858) | 0.605726 / 4.805227 (-4.199501) | 0.127618 / 6.500664 (-6.373046) | 0.062840 / 0.075469 (-0.012629) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.367276 / 1.841788 (-0.474512) | 18.069385 / 8.074308 (9.995077) | 14.691945 / 10.191392 (4.500553) | 0.147203 / 0.680424 (-0.533221) | 0.018484 / 0.534201 (-0.515717) | 0.333759 / 0.579283 (-0.245524) | 0.395503 / 0.434364 (-0.038861) | 0.387031 / 0.540337 (-0.153306) | 0.550428 / 1.386936 (-0.836508) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4c8f7eb79dff66dd03211321dcb55f7a7a05ef38 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007675 / 0.011353 (-0.003678) | 0.004532 / 0.011008 (-0.006476) | 0.088176 / 0.038508 (0.049668) | 0.103257 / 0.023109 (0.080148) | 0.314785 / 0.275898 (0.038887) | 0.354280 / 0.323480 (0.030800) | 0.004638 / 0.007986 (-0.003348) | 0.003736 / 0.004328 (-0.000592) | 0.066744 / 0.004250 (0.062493) | 0.064647 / 0.037052 (0.027595) | 0.320227 / 0.258489 (0.061738) | 0.369581 / 0.293841 (0.075740) | 0.032347 / 0.128546 (-0.096199) | 0.009226 / 0.075646 (-0.066421) | 0.292966 / 0.419271 (-0.126306) | 0.055738 / 0.043533 (0.012206) | 0.316537 / 0.255139 (0.061398) | 0.334699 / 0.283200 (0.051499) | 0.027401 / 0.141683 (-0.114282) | 1.482390 / 1.452155 (0.030236) | 1.594771 / 1.492716 (0.102055) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.322181 / 0.018006 (0.304175) | 0.577701 / 0.000490 (0.577212) | 0.014565 / 0.000200 (0.014365) | 0.000393 / 0.000054 (0.000338) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.033255 / 0.037411 (-0.004156) | 0.094271 / 0.014526 (0.079745) | 0.105360 / 0.176557 (-0.071197) | 0.163699 / 0.737135 (-0.573436) | 0.105620 / 0.296338 (-0.190719) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.383449 / 0.215209 (0.168240) | 3.824292 / 2.077655 (1.746637) | 1.861809 / 1.504120 (0.357689) | 1.698153 / 1.541195 (0.156958) | 1.819460 / 1.468490 (0.350970) | 0.488277 / 4.584777 (-4.096500) | 3.622772 / 3.745712 (-0.122940) | 3.486041 / 5.269862 (-1.783821) | 2.211679 / 4.565676 (-2.353998) | 0.057637 / 0.424275 (-0.366638) | 0.008028 / 0.007607 (0.000421) | 0.461917 / 0.226044 (0.235873) | 4.626493 / 2.268929 (2.357565) | 2.374846 / 55.444624 (-53.069779) | 1.976003 / 6.876477 (-4.900473) | 2.325342 / 2.142072 (0.183269) | 0.582538 / 4.805227 (-4.222689) | 0.133575 / 6.500664 (-6.367089) | 0.061696 / 0.075469 (-0.013773) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.271846 / 1.841788 (-0.569941) | 20.944702 / 8.074308 (12.870394) | 15.438119 / 10.191392 (5.246727) | 0.167334 / 0.680424 (-0.513090) | 0.019538 / 0.534201 (-0.514663) | 0.401467 / 0.579283 (-0.177816) | 0.428222 / 0.434364 (-0.006142) | 0.466108 / 0.540337 (-0.074229) | 0.645326 / 1.386936 (-0.741610) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007096 / 0.011353 (-0.004257) | 0.004398 / 0.011008 (-0.006610) | 0.066253 / 0.038508 (0.027745) | 0.089415 / 0.023109 (0.066306) | 0.395760 / 0.275898 (0.119862) | 0.436058 / 0.323480 (0.112579) | 0.005944 / 0.007986 (-0.002042) | 0.003821 / 0.004328 (-0.000507) | 0.065286 / 0.004250 (0.061036) | 0.060990 / 0.037052 (0.023937) | 0.394674 / 0.258489 (0.136185) | 0.437672 / 0.293841 (0.143831) | 0.032370 / 0.128546 (-0.096177) | 0.009025 / 0.075646 (-0.066622) | 0.071365 / 0.419271 (-0.347906) | 0.048232 / 0.043533 (0.004699) | 0.395677 / 0.255139 (0.140538) | 0.415869 / 0.283200 (0.132669) | 0.024632 / 0.141683 (-0.117051) | 1.511386 / 1.452155 (0.059231) | 1.604475 / 1.492716 (0.111759) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.312864 / 0.018006 (0.294858) | 0.535432 / 0.000490 (0.534943) | 0.005195 / 0.000200 (0.004995) | 0.000101 / 0.000054 (0.000047) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.035827 / 0.037411 (-0.001584) | 0.099353 / 0.014526 (0.084827) | 0.110796 / 0.176557 (-0.065761) | 0.165224 / 0.737135 (-0.571911) | 0.112111 / 0.296338 (-0.184228) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.428873 / 0.215209 (0.213664) | 4.284264 / 2.077655 (2.206609) | 2.303966 / 1.504120 (0.799847) | 2.153868 / 1.541195 (0.612674) | 2.275669 / 1.468490 (0.807179) | 0.495452 / 4.584777 (-4.089325) | 3.706773 / 3.745712 (-0.038939) | 3.471988 / 5.269862 (-1.797874) | 2.194851 / 4.565676 (-2.370825) | 0.058998 / 0.424275 (-0.365277) | 0.007522 / 0.007607 (-0.000085) | 0.511222 / 0.226044 (0.285177) | 5.097058 / 2.268929 (2.828130) | 2.856793 / 55.444624 (-52.587832) | 2.521907 / 6.876477 (-4.354569) | 2.783133 / 2.142072 (0.641060) | 0.600511 / 4.805227 (-4.204717) | 0.134130 / 6.500664 (-6.366534) | 0.061726 / 0.075469 (-0.013743) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.385272 / 1.841788 (-0.456516) | 21.149260 / 8.074308 (13.074952) | 15.548746 / 10.191392 (5.357354) | 0.167506 / 0.680424 (-0.512918) | 0.020494 / 0.534201 (-0.513707) | 0.400697 / 0.579283 (-0.178586) | 0.427386 / 0.434364 (-0.006978) | 0.478514 / 0.540337 (-0.061824) | 0.655753 / 1.386936 (-0.731183) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4c8f7eb79dff66dd03211321dcb55f7a7a05ef38 \"CML watermark\")\n" ]
2023-10-23T14:43:26Z
2023-10-23T15:21:54Z
2023-10-23T15:07:25Z
MEMBER
null
null
null
null
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6342/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6342/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6342.diff", "html_url": "https://github.com/huggingface/datasets/pull/6342", "merged_at": "2023-10-23T15:07:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/6342.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6342" }
https://api.github.com/repos/huggingface/datasets/issues/5228
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5228/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5228/comments
https://api.github.com/repos/huggingface/datasets/issues/5228/events
https://github.com/huggingface/datasets/issues/5228
1,444,763,105
I_kwDODunzps5WHVXh
5,228
Loading a dataset from the hub fails if you happen to have a folder of the same name
{ "avatar_url": "https://avatars.githubusercontent.com/u/43149077?v=4", "events_url": "https://api.github.com/users/dakinggg/events{/privacy}", "followers_url": "https://api.github.com/users/dakinggg/followers", "following_url": "https://api.github.com/users/dakinggg/following{/other_user}", "gists_url": "https://api.github.com/users/dakinggg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dakinggg", "id": 43149077, "login": "dakinggg", "node_id": "MDQ6VXNlcjQzMTQ5MDc3", "organizations_url": "https://api.github.com/users/dakinggg/orgs", "received_events_url": "https://api.github.com/users/dakinggg/received_events", "repos_url": "https://api.github.com/users/dakinggg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dakinggg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dakinggg/subscriptions", "type": "User", "url": "https://api.github.com/users/dakinggg", "user_view_type": "public" }
[]
open
false
null
[]
null
[ "`load_dataset` first checks for a local directory before checking for the Hub.\r\n\r\nTo make it explicit that it has to fetch the Hub, we could support the `hffs` syntax:\r\n```python\r\nload_dataset(\"hf://datasets/glue\")\r\n```\r\n\r\nwould that work for you ? Also cc @mariosasko who's leading the `hffs` project", "yeah, that would be a fine solution.", "This still has no proper solution in 2.11\r\n\r\nperhaps have a `download_config=\"force_remote\"` or just backtrack once you reach `EmptyDatasetError` locally and then try to load it from the hub (or a local cache, as that only gets checked if there is no local folder...?)" ]
2022-11-11T00:51:54Z
2023-05-03T23:23:04Z
null
CONTRIBUTOR
null
null
{ "completed": 0, "percent_completed": 0, "total": 0 }
### Describe the bug I'm not 100% sure this should be considered a bug, but it was certainly annoying to figure out the cause of. And perhaps I am just missing a specific argument needed to avoid this conflict. Basically I had a situation where multiple workers were downloading different parts of the glue dataset and then training on them. Additionally, they were writing their checkpoints to a folder called `glue`. This meant that once one worker had created the `glue` folder to write checkpoints to, the next worker to try to load a glue dataset would fail as shown in the minimal repro below. I'm not sure what the solution would be since I'm not super familiar with the `datasets` code, but I would expect `load_dataset` to not crash just because i have a local folder with the same name as a dataset from the hub. ### Steps to reproduce the bug ``` In [1]: import datasets In [2]: rte = datasets.load_dataset('glue', 'rte') Downloading and preparing dataset glue/rte to /Users/danielking/.cache/huggingface/datasets/glue/rte/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad... Downloading data: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 697k/697k [00:00<00:00, 6.08MB/s] Dataset glue downloaded and prepared to /Users/danielking/.cache/huggingface/datasets/glue/rte/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad. Subsequent calls will reuse this data. 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 773.81it/s] In [3]: import os In [4]: os.mkdir('glue') In [5]: rte = datasets.load_dataset('glue', 'rte') --------------------------------------------------------------------------- EmptyDatasetError Traceback (most recent call last) <ipython-input-5-0d6b9ad8bbd0> in <cell line: 1>() ----> 1 rte = datasets.load_dataset('glue', 'rte') ~/miniconda3/envs/composer/lib/python3.9/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1717 1718 # Create a dataset builder -> 1719 builder_instance = load_dataset_builder( 1720 path=path, 1721 name=name, ~/miniconda3/envs/composer/lib/python3.9/site-packages/datasets/load.py in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, use_auth_token, **config_kwargs) 1495 download_config = download_config.copy() if download_config else DownloadConfig() 1496 download_config.use_auth_token = use_auth_token -> 1497 dataset_module = dataset_module_factory( 1498 path, 1499 revision=revision, ~/miniconda3/envs/composer/lib/python3.9/site-packages/datasets/load.py in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs) 1152 ).get_module() 1153 elif os.path.isdir(path): -> 1154 return LocalDatasetModuleFactoryWithoutScript( 1155 path, data_dir=data_dir, data_files=data_files, download_mode=download_mode 1156 ).get_module() ~/miniconda3/envs/composer/lib/python3.9/site-packages/datasets/load.py in get_module(self) 624 base_path = os.path.join(self.path, self.data_dir) if self.data_dir else self.path 625 patterns = ( --> 626 sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns_locally(base_path) 627 ) 628 data_files = DataFilesDict.from_local_or_remote( ~/miniconda3/envs/composer/lib/python3.9/site-packages/datasets/data_files.py in get_data_patterns_locally(base_path) 458 return _get_data_files_patterns(resolver) 459 except FileNotFoundError: --> 460 raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None 461 462 EmptyDatasetError: The directory at glue doesn't contain any data files ``` ### Expected behavior Dataset is still able to be loaded from the hub even if I have a local folder with the same name. ### Environment info datasets version: 2.6.1
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5228/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5228/timeline
null
null
null
null
https://api.github.com/repos/huggingface/datasets/issues/6968
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6968/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6968/comments
https://api.github.com/repos/huggingface/datasets/issues/6968/events
https://github.com/huggingface/datasets/pull/6968
2,351,331,417
PR_kwDODunzps5yX7Qr
6,968
Use `HF_HUB_OFFLINE` instead of `HF_DATASETS_OFFLINE`
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6968). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "Oops, sorry for the style issue. Fixed in https://github.com/huggingface/datasets/pull/6968/commits/a4e2b28fa647b28190ae2615d7271e6ac63c8499.\r\n\r\nRegarding docs, I can't find mentions of `HF_DATASETS_OFFLINE` anywhere else in `datasets`/`hub-docs`. Once this is merged and released, I'm planning to update some `transformers` docs that briefly mention it.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005173 / 0.011353 (-0.006180) | 0.003485 / 0.011008 (-0.007524) | 0.063867 / 0.038508 (0.025359) | 0.031338 / 0.023109 (0.008229) | 0.242093 / 0.275898 (-0.033805) | 0.266606 / 0.323480 (-0.056874) | 0.003069 / 0.007986 (-0.004916) | 0.003307 / 0.004328 (-0.001022) | 0.051059 / 0.004250 (0.046808) | 0.044396 / 0.037052 (0.007344) | 0.254896 / 0.258489 (-0.003593) | 0.282835 / 0.293841 (-0.011006) | 0.027548 / 0.128546 (-0.100998) | 0.010520 / 0.075646 (-0.065126) | 0.201701 / 0.419271 (-0.217570) | 0.035613 / 0.043533 (-0.007920) | 0.240955 / 0.255139 (-0.014184) | 0.271902 / 0.283200 (-0.011298) | 0.019826 / 0.141683 (-0.121857) | 1.116994 / 1.452155 (-0.335161) | 1.162886 / 1.492716 (-0.329831) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093683 / 0.018006 (0.075677) | 0.297970 / 0.000490 (0.297480) | 0.000211 / 0.000200 (0.000011) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018952 / 0.037411 (-0.018459) | 0.062710 / 0.014526 (0.048184) | 0.073641 / 0.176557 (-0.102916) | 0.121200 / 0.737135 (-0.615935) | 0.075723 / 0.296338 (-0.220616) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286056 / 0.215209 (0.070847) | 2.811424 / 2.077655 (0.733770) | 1.448045 / 1.504120 (-0.056075) | 1.338309 / 1.541195 (-0.202885) | 1.328371 / 1.468490 (-0.140119) | 0.557282 / 4.584777 (-4.027495) | 2.362235 / 3.745712 (-1.383477) | 2.732108 / 5.269862 (-2.537754) | 1.730911 / 4.565676 (-2.834765) | 0.061689 / 0.424275 (-0.362586) | 0.004947 / 0.007607 (-0.002660) | 0.346700 / 0.226044 (0.120656) | 3.355989 / 2.268929 (1.087060) | 1.828078 / 55.444624 (-53.616546) | 1.511531 / 6.876477 (-5.364946) | 1.535897 / 2.142072 (-0.606175) | 0.630276 / 4.805227 (-4.174951) | 0.115808 / 6.500664 (-6.384857) | 0.042199 / 0.075469 (-0.033270) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.969203 / 1.841788 (-0.872584) | 11.282997 / 8.074308 (3.208689) | 9.538914 / 10.191392 (-0.652478) | 0.140072 / 0.680424 (-0.540352) | 0.014021 / 0.534201 (-0.520180) | 0.283784 / 0.579283 (-0.295499) | 0.255973 / 0.434364 (-0.178391) | 0.320284 / 0.540337 (-0.220053) | 0.412689 / 1.386936 (-0.974247) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005201 / 0.011353 (-0.006152) | 0.003312 / 0.011008 (-0.007697) | 0.050044 / 0.038508 (0.011536) | 0.033610 / 0.023109 (0.010501) | 0.266429 / 0.275898 (-0.009469) | 0.287782 / 0.323480 (-0.035698) | 0.004316 / 0.007986 (-0.003670) | 0.002696 / 0.004328 (-0.001633) | 0.049667 / 0.004250 (0.045417) | 0.040244 / 0.037052 (0.003192) | 0.278870 / 0.258489 (0.020381) | 0.311415 / 0.293841 (0.017574) | 0.029150 / 0.128546 (-0.099396) | 0.010046 / 0.075646 (-0.065600) | 0.058527 / 0.419271 (-0.360744) | 0.032871 / 0.043533 (-0.010662) | 0.266582 / 0.255139 (0.011443) | 0.286157 / 0.283200 (0.002957) | 0.017197 / 0.141683 (-0.124486) | 1.120944 / 1.452155 (-0.331211) | 1.161111 / 1.492716 (-0.331606) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092679 / 0.018006 (0.074672) | 0.299195 / 0.000490 (0.298705) | 0.000204 / 0.000200 (0.000004) | 0.000048 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022212 / 0.037411 (-0.015199) | 0.076734 / 0.014526 (0.062208) | 0.088326 / 0.176557 (-0.088230) | 0.128209 / 0.737135 (-0.608926) | 0.088807 / 0.296338 (-0.207531) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291782 / 0.215209 (0.076573) | 2.882990 / 2.077655 (0.805335) | 1.601638 / 1.504120 (0.097518) | 1.457560 / 1.541195 (-0.083635) | 1.470517 / 1.468490 (0.002027) | 0.565738 / 4.584777 (-4.019039) | 0.949235 / 3.745712 (-2.796478) | 2.661927 / 5.269862 (-2.607934) | 1.722178 / 4.565676 (-2.843498) | 0.063680 / 0.424275 (-0.360595) | 0.005339 / 0.007607 (-0.002268) | 0.344280 / 0.226044 (0.118235) | 3.432998 / 2.268929 (1.164070) | 1.985516 / 55.444624 (-53.459108) | 1.651826 / 6.876477 (-5.224651) | 1.764541 / 2.142072 (-0.377531) | 0.640219 / 4.805227 (-4.165008) | 0.116541 / 6.500664 (-6.384124) | 0.041237 / 0.075469 (-0.034232) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.013927 / 1.841788 (-0.827861) | 11.876661 / 8.074308 (3.802353) | 10.264144 / 10.191392 (0.072752) | 0.131151 / 0.680424 (-0.549273) | 0.015774 / 0.534201 (-0.518427) | 0.284948 / 0.579283 (-0.294335) | 0.125924 / 0.434364 (-0.308439) | 0.319845 / 0.540337 (-0.220493) | 0.431978 / 1.386936 (-0.954958) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#68f67741ffde68c98d0a2f59ac4d8e3a7bc03065 \"CML watermark\")\n" ]
2024-06-13T14:39:40Z
2024-06-13T17:31:37Z
2024-06-13T17:25:37Z
CONTRIBUTOR
null
null
null
To use `datasets` offline, one can use the `HF_DATASETS_OFFLINE` environment variable. This PR makes `HF_HUB_OFFLINE` the recommended environment variable for offline training. Goal is to be more consistent with the rest of HF ecosystem and have a single config value to set. The changes are backward-compatible meaning that: - `HF_DATASETS_OFFLINE` environment is still taken into account, though not documented - `datasets.config.HF_DATASETS_OFFLINE` still exists, though it is not used anymore (in favor of `datasets.config.HF_HUB_OFFLINE`) **Note:** it might break things in downstream libraries if they were monkeypatching `datasets.config.HF_DATASETS_OFFLINE` in their CI tests (for instance). Not much of a problem IMO.
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin", "user_view_type": "public" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6968/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6968/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6968.diff", "html_url": "https://github.com/huggingface/datasets/pull/6968", "merged_at": "2024-06-13T17:25:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/6968.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6968" }
https://api.github.com/repos/huggingface/datasets/issues/7370
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7370/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7370/comments
https://api.github.com/repos/huggingface/datasets/issues/7370/events
https://github.com/huggingface/datasets/pull/7370
2,787,972,786
PR_kwDODunzps6HwAu7
7,370
Support faster processing using pandas or polars functions in `IterableDataset.map()`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_7370). All of your documentation changes will be reflected on that endpoint. The docs are available until 30 days after the last update.", "merging this and will make some docs and communications around using polars for optimizing data processing :)" ]
2025-01-14T18:14:13Z
2025-01-31T11:08:15Z
2025-01-30T13:30:57Z
MEMBER
null
null
null
Following the polars integration :) Allow super fast processing using pandas or polars functions in `IterableDataset.map()` by adding support to pandas and polars formatting in `IterableDataset` ```python import polars as pl from datasets import Dataset ds = Dataset.from_dict({"i": range(10)}).to_iterable_dataset() ds = ds.with_format("polars") ds = ds.map(lambda df: df.with_columns(pl.col("i").add(1).alias("i+1")), batched=True) ds = ds.with_format(None) print(next(iter(ds))) # {'i': 0, 'i+1': 1} ``` It leverages arrow's zero-copy features from/to pandas and polars. related to https://github.com/huggingface/datasets/issues/3444 #6762
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq", "user_view_type": "public" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7370/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7370/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7370.diff", "html_url": "https://github.com/huggingface/datasets/pull/7370", "merged_at": "2025-01-30T13:30:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/7370.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7370" }