comments_url
stringlengths
70
70
timeline_url
stringlengths
70
70
closed_at
stringlengths
20
20
performed_via_github_app
null
state_reason
stringclasses
3 values
node_id
stringlengths
18
32
state
stringclasses
2 values
assignees
listlengths
0
4
draft
bool
2 classes
number
int64
1.61k
6.73k
user
dict
title
stringlengths
1
290
events_url
stringlengths
68
68
milestone
dict
labels_url
stringlengths
75
75
created_at
stringlengths
20
20
active_lock_reason
null
locked
bool
1 class
assignee
dict
pull_request
dict
id
int64
771M
2.18B
labels
listlengths
0
4
url
stringlengths
61
61
comments
sequencelengths
0
30
repository_url
stringclasses
1 value
author_association
stringclasses
3 values
body
stringlengths
0
228k
updated_at
stringlengths
20
20
html_url
stringlengths
49
51
reactions
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/1809/comments
https://api.github.com/repos/huggingface/datasets/issues/1809/timeline
2021-02-03T16:43:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY1NzY4ODQz
closed
[]
false
1,809
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Add FreebaseQA dataset
https://api.github.com/repos/huggingface/datasets/issues/1809/events
null
https://api.github.com/repos/huggingface/datasets/issues/1809/labels{/name}
2021-02-02T08:35:53Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1809.diff", "html_url": "https://github.com/huggingface/datasets/pull/1809", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1809.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1809" }
799,059,141
[]
https://api.github.com/repos/huggingface/datasets/issues/1809
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adding FreebaseQA dataset suggested in PR #1435 with minor edits. Also closes that PR. Requesting @lhoestq to review.
2021-02-03T17:15:05Z
https://github.com/huggingface/datasets/pull/1809
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1809/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1808/comments
https://api.github.com/repos/huggingface/datasets/issues/1808/timeline
2022-06-01T15:38:13Z
null
completed
MDU6SXNzdWU3OTg4NzkxODA=
closed
[]
null
1,808
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
writing Datasets in a human readable format
https://api.github.com/repos/huggingface/datasets/issues/1808/events
null
https://api.github.com/repos/huggingface/datasets/issues/1808/labels{/name}
2021-02-02T02:55:40Z
null
false
null
null
798,879,180
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" } ]
https://api.github.com/repos/huggingface/datasets/issues/1808
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I see there is a save_to_disk function to save data, but this is not human readable format, is there a way I could save a Dataset object in a human readable format to a file like json? thanks @lhoestq
2022-06-01T15:38:13Z
https://github.com/huggingface/datasets/issues/1808
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1808/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1807/comments
https://api.github.com/repos/huggingface/datasets/issues/1807/timeline
2021-02-02T18:06:58Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY1NTczNzU5
closed
[]
false
1,807
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite" }
Adding an aggregated dataset for the GEM benchmark
https://api.github.com/repos/huggingface/datasets/issues/1807/events
null
https://api.github.com/repos/huggingface/datasets/issues/1807/labels{/name}
2021-02-02T00:39:53Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1807.diff", "html_url": "https://github.com/huggingface/datasets/pull/1807", "merged_at": "2021-02-02T18:06:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/1807.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1807" }
798,823,591
[]
https://api.github.com/repos/huggingface/datasets/issues/1807
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
This dataset gathers modified versions of several other conditional text generation datasets which together make up the shared task for the Generation Evaluation and Metrics workshop (think GLUE for text generation) The changes from the original datasets are detailed in the Dataset Cards on the GEM website, which are linked to in this dataset card. cc @sebastianGehrmann
2021-02-02T22:48:41Z
https://github.com/huggingface/datasets/pull/1807
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1807/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1806/comments
https://api.github.com/repos/huggingface/datasets/issues/1806/timeline
2021-02-01T18:46:21Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY1Mzk0ODIz
closed
[]
false
1,806
{ "avatar_url": "https://avatars.githubusercontent.com/u/15138872?v=4", "events_url": "https://api.github.com/users/padipadou/events{/privacy}", "followers_url": "https://api.github.com/users/padipadou/followers", "following_url": "https://api.github.com/users/padipadou/following{/other_user}", "gists_url": "https://api.github.com/users/padipadou/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/padipadou", "id": 15138872, "login": "padipadou", "node_id": "MDQ6VXNlcjE1MTM4ODcy", "organizations_url": "https://api.github.com/users/padipadou/orgs", "received_events_url": "https://api.github.com/users/padipadou/received_events", "repos_url": "https://api.github.com/users/padipadou/repos", "site_admin": false, "starred_url": "https://api.github.com/users/padipadou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/padipadou/subscriptions", "type": "User", "url": "https://api.github.com/users/padipadou" }
Update details to MLSUM dataset
https://api.github.com/repos/huggingface/datasets/issues/1806/events
null
https://api.github.com/repos/huggingface/datasets/issues/1806/labels{/name}
2021-02-01T18:35:12Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1806.diff", "html_url": "https://github.com/huggingface/datasets/pull/1806", "merged_at": "2021-02-01T18:46:21Z", "patch_url": "https://github.com/huggingface/datasets/pull/1806.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1806" }
798,607,869
[]
https://api.github.com/repos/huggingface/datasets/issues/1806
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Update details to MLSUM dataset
2021-02-01T18:46:28Z
https://github.com/huggingface/datasets/pull/1806
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1806/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1805/comments
https://api.github.com/repos/huggingface/datasets/issues/1805/timeline
2021-03-06T14:32:46Z
null
completed
MDU6SXNzdWU3OTg0OTgwNTM=
closed
[]
null
1,805
{ "avatar_url": "https://avatars.githubusercontent.com/u/6608232?v=4", "events_url": "https://api.github.com/users/abarbosa94/events{/privacy}", "followers_url": "https://api.github.com/users/abarbosa94/followers", "following_url": "https://api.github.com/users/abarbosa94/following{/other_user}", "gists_url": "https://api.github.com/users/abarbosa94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/abarbosa94", "id": 6608232, "login": "abarbosa94", "node_id": "MDQ6VXNlcjY2MDgyMzI=", "organizations_url": "https://api.github.com/users/abarbosa94/orgs", "received_events_url": "https://api.github.com/users/abarbosa94/received_events", "repos_url": "https://api.github.com/users/abarbosa94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/abarbosa94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abarbosa94/subscriptions", "type": "User", "url": "https://api.github.com/users/abarbosa94" }
can't pickle SwigPyObject objects when calling dataset.get_nearest_examples from FAISS index
https://api.github.com/repos/huggingface/datasets/issues/1805/events
null
https://api.github.com/repos/huggingface/datasets/issues/1805/labels{/name}
2021-02-01T16:14:17Z
null
false
null
null
798,498,053
[]
https://api.github.com/repos/huggingface/datasets/issues/1805
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
So, I have the following instances in my dataset ``` {'question': 'An astronomer observes that a planet rotates faster after a meteorite impact. Which is the most likely effect of this increase in rotation?', 'answer': 'C', 'example_id': 'ARCCH_Mercury_7175875', 'options':[{'option_context': 'One effect of increased amperage in the planetary world (..)', 'option_id': 'A', 'option_text': 'Planetary density will decrease.'}, (...)]} ``` The `options` value is always an list with 4 options, each one is a dict with `option_context`; `option_id` and `option_text`. I would like to overwrite the `option_context` of each instance of my dataset for a dpr result that I am developing. Then, I trained a model already and save it in a FAISS index ``` dpr_dataset = load_dataset( "text", data_files=ARC_CORPUS_TEXT, cache_dir=CACHE_DIR, split="train[:100%]", ) dpr_dataset.load_faiss_index("embeddings", f"{ARC_CORPUS_FAISS}") torch.set_grad_enabled(False) ``` Then, as a processor of my dataset, I created a map function that calls the `dpr_dataset` for each _option_ ``` def generate_context(example): question_text = example['question'] for option in example['options']: question_with_option = question_text + " " + option['option_text'] tokenize_text = question_tokenizer(question_with_option, return_tensors="pt").to(device) question_embed = ( question_encoder(**tokenize_text) )[0][0].cpu().numpy() _, retrieved_examples = dpr_dataset.get_nearest_examples( "embeddings", question_embed, k=10 ) # option["option_context"] = retrieved_examples["text"] # option["option_context"] = " ".join(option["option_context"]).strip() #result_dict = { # 'example_id': example['example_id'], # 'answer': example['answer'], # 'question': question_text, #options': example['options'] # } return example ``` I intentionally commented on this portion of the code. But when I call the `map` method, `ds_with_context = dataset.map(generate_context,load_from_cache_file=False)` It calls the following error: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-55-75a458ce205c> in <module> ----> 1 ds_with_context = dataset.map(generate_context,load_from_cache_file=False) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/dataset_dict.py in map(self, function, with_indices, input_columns, batched, batch_size, remove_columns, keep_in_memory, load_from_cache_file, cache_file_names, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc) 301 num_proc=num_proc, 302 ) --> 303 for k, dataset in self.items() 304 } 305 ) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/dataset_dict.py in <dictcomp>(.0) 301 num_proc=num_proc, 302 ) --> 303 for k, dataset in self.items() 304 } 305 ) ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint) 1257 fn_kwargs=fn_kwargs, 1258 new_fingerprint=new_fingerprint, -> 1259 update_data=update_data, 1260 ) 1261 else: ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 155 } 156 # apply actual function --> 157 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 158 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 159 # re-apply format to the output ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in wrapper(*args, **kwargs) 156 kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name 157 kwargs[fingerprint_name] = update_fingerprint( --> 158 self._fingerprint, transform, kwargs_for_fingerprint 159 ) 160 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in update_fingerprint(fingerprint, transform, transform_args) 103 for key in sorted(transform_args): 104 hasher.update(key) --> 105 hasher.update(transform_args[key]) 106 return hasher.hexdigest() 107 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in update(self, value) 55 def update(self, value): 56 self.m.update(f"=={type(value)}==".encode("utf8")) ---> 57 self.m.update(self.hash(value).encode("utf-8")) 58 59 def hexdigest(self): ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in hash(cls, value) 51 return cls.dispatch[type(value)](cls, value) 52 else: ---> 53 return cls.hash_default(value) 54 55 def update(self, value): ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/fingerprint.py in hash_default(cls, value) 44 @classmethod 45 def hash_default(cls, value): ---> 46 return cls.hash_bytes(dumps(value)) 47 48 @classmethod ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/utils/py_utils.py in dumps(obj) 387 file = StringIO() 388 with _no_cache_fields(obj): --> 389 dump(obj, file) 390 return file.getvalue() 391 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/utils/py_utils.py in dump(obj, file) 359 def dump(obj, file): 360 """pickle an object to a file""" --> 361 Pickler(file, recurse=True).dump(obj) 362 return 363 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in dump(self, obj) 452 raise PicklingError(msg) 453 else: --> 454 StockPickler.dump(self, obj) 455 stack.clear() # clear record of 'recursion-sensitive' pickled objects 456 return /usr/lib/python3.7/pickle.py in dump(self, obj) 435 if self.proto >= 4: 436 self.framer.start_framing() --> 437 self.save(obj) 438 self.write(STOP) 439 self.framer.end_framing() /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/datasets/utils/py_utils.py in save_function(pickler, obj) 554 dill._dill._create_function, 555 (obj.__code__, globs, obj.__name__, obj.__defaults__, obj.__closure__, obj.__dict__, fkwdefaults), --> 556 obj=obj, 557 ) 558 else: /usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj) 636 else: 637 save(func) --> 638 save(args) 639 write(REDUCE) 640 /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 /usr/lib/python3.7/pickle.py in save_tuple(self, obj) 784 write(MARK) 785 for element in obj: --> 786 save(element) 787 788 if id(obj) in memo: /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj) 939 # we only care about session the first pass thru 940 pickler._session = False --> 941 StockPickler.save_dict(pickler, obj) 942 log.info("# D2") 943 return /usr/lib/python3.7/pickle.py in save_dict(self, obj) 854 855 self.memoize(obj) --> 856 self._batch_setitems(obj.items()) 857 858 dispatch[dict] = save_dict /usr/lib/python3.7/pickle.py in _batch_setitems(self, items) 880 for k, v in tmp: 881 save(k) --> 882 save(v) 883 write(SETITEMS) 884 elif n: /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 547 548 # Save the reduce() output and finally memoize the object --> 549 self.save_reduce(obj=obj, *rv) 550 551 def persistent_id(self, obj): /usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj) 660 661 if state is not None: --> 662 save(state) 663 write(BUILD) 664 /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj) 939 # we only care about session the first pass thru 940 pickler._session = False --> 941 StockPickler.save_dict(pickler, obj) 942 log.info("# D2") 943 return /usr/lib/python3.7/pickle.py in save_dict(self, obj) 854 855 self.memoize(obj) --> 856 self._batch_setitems(obj.items()) 857 858 dispatch[dict] = save_dict /usr/lib/python3.7/pickle.py in _batch_setitems(self, items) 880 for k, v in tmp: 881 save(k) --> 882 save(v) 883 write(SETITEMS) 884 elif n: /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj) 939 # we only care about session the first pass thru 940 pickler._session = False --> 941 StockPickler.save_dict(pickler, obj) 942 log.info("# D2") 943 return /usr/lib/python3.7/pickle.py in save_dict(self, obj) 854 855 self.memoize(obj) --> 856 self._batch_setitems(obj.items()) 857 858 dispatch[dict] = save_dict /usr/lib/python3.7/pickle.py in _batch_setitems(self, items) 885 k, v = tmp[0] 886 save(k) --> 887 save(v) 888 write(SETITEM) 889 # else tmp is empty, and we're done /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 547 548 # Save the reduce() output and finally memoize the object --> 549 self.save_reduce(obj=obj, *rv) 550 551 def persistent_id(self, obj): /usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj) 660 661 if state is not None: --> 662 save(state) 663 write(BUILD) 664 /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj) 939 # we only care about session the first pass thru 940 pickler._session = False --> 941 StockPickler.save_dict(pickler, obj) 942 log.info("# D2") 943 return /usr/lib/python3.7/pickle.py in save_dict(self, obj) 854 855 self.memoize(obj) --> 856 self._batch_setitems(obj.items()) 857 858 dispatch[dict] = save_dict /usr/lib/python3.7/pickle.py in _batch_setitems(self, items) 880 for k, v in tmp: 881 save(k) --> 882 save(v) 883 write(SETITEMS) 884 elif n: /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 547 548 # Save the reduce() output and finally memoize the object --> 549 self.save_reduce(obj=obj, *rv) 550 551 def persistent_id(self, obj): /usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj) 660 661 if state is not None: --> 662 save(state) 663 write(BUILD) 664 /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 502 f = self.dispatch.get(t) 503 if f is not None: --> 504 f(self, obj) # Call unbound method with explicit self 505 return 506 ~/.cache/pypoetry/virtualenvs/masters-utTTC0p8-py3.7/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj) 939 # we only care about session the first pass thru 940 pickler._session = False --> 941 StockPickler.save_dict(pickler, obj) 942 log.info("# D2") 943 return /usr/lib/python3.7/pickle.py in save_dict(self, obj) 854 855 self.memoize(obj) --> 856 self._batch_setitems(obj.items()) 857 858 dispatch[dict] = save_dict /usr/lib/python3.7/pickle.py in _batch_setitems(self, items) 885 k, v = tmp[0] 886 save(k) --> 887 save(v) 888 write(SETITEM) 889 # else tmp is empty, and we're done /usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id) 522 reduce = getattr(obj, "__reduce_ex__", None) 523 if reduce is not None: --> 524 rv = reduce(self.proto) 525 else: 526 reduce = getattr(obj, "__reduce__", None) TypeError: can't pickle SwigPyObject objects ``` Which I have no idea how to solve/deal with it
2021-03-06T14:32:46Z
https://github.com/huggingface/datasets/issues/1805
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1805/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1804/comments
https://api.github.com/repos/huggingface/datasets/issues/1804/timeline
2021-02-05T15:49:25Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY1MjkzMTc3
closed
[]
false
1,804
{ "avatar_url": "https://avatars.githubusercontent.com/u/36051308?v=4", "events_url": "https://api.github.com/users/calpt/events{/privacy}", "followers_url": "https://api.github.com/users/calpt/followers", "following_url": "https://api.github.com/users/calpt/following{/other_user}", "gists_url": "https://api.github.com/users/calpt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/calpt", "id": 36051308, "login": "calpt", "node_id": "MDQ6VXNlcjM2MDUxMzA4", "organizations_url": "https://api.github.com/users/calpt/orgs", "received_events_url": "https://api.github.com/users/calpt/received_events", "repos_url": "https://api.github.com/users/calpt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/calpt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/calpt/subscriptions", "type": "User", "url": "https://api.github.com/users/calpt" }
Add SICK dataset
https://api.github.com/repos/huggingface/datasets/issues/1804/events
null
https://api.github.com/repos/huggingface/datasets/issues/1804/labels{/name}
2021-02-01T15:57:44Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1804.diff", "html_url": "https://github.com/huggingface/datasets/pull/1804", "merged_at": "2021-02-05T15:49:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/1804.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1804" }
798,483,881
[]
https://api.github.com/repos/huggingface/datasets/issues/1804
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adds the SICK dataset (http://marcobaroni.org/composes/sick.html). Closes #1772. Edit: also closes #1632, which is the original issue requesting the dataset. The newer one is a duplicate.
2021-02-05T17:46:28Z
https://github.com/huggingface/datasets/pull/1804
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1804/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1803/comments
https://api.github.com/repos/huggingface/datasets/issues/1803/timeline
2021-08-04T18:10:42Z
null
completed
MDU6SXNzdWU3OTgyNDM5MDQ=
closed
[]
null
1,803
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Querying examples from big datasets is slower than small datasets
https://api.github.com/repos/huggingface/datasets/issues/1803/events
null
https://api.github.com/repos/huggingface/datasets/issues/1803/labels{/name}
2021-02-01T11:08:23Z
null
false
null
null
798,243,904
[]
https://api.github.com/repos/huggingface/datasets/issues/1803
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
After some experiments with bookcorpus I noticed that querying examples from big datasets is slower than small datasets. For example ```python from datasets import load_dataset b1 = load_dataset("bookcorpus", split="train[:1%]") b50 = load_dataset("bookcorpus", split="train[:50%]") b100 = load_dataset("bookcorpus", split="train[:100%]") %timeit _ = b1[-1] # 12.2 µs ± 70.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) %timeit _ = b50[-1] # 92.5 µs ± 1.24 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) %timeit _ = b100[-1] # 177 µs ± 3.13 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) ``` It looks like the time to fetch the example increases with the size of the dataset. This is maybe due to the use of the Arrow streaming format to store the data on disk. I guess pyarrow needs to iterate through the file as a stream to find the queried sample. Maybe switching to the Arrow IPC file format could help fixing this issue. Indeed according to the [documentation](https://arrow.apache.org/docs/format/Columnar.html?highlight=arrow1#ipc-file-format), it's identical to the streaming format except that it contains the memory offsets of each sample, which could fix the issue: > We define a “file format” supporting random access that is build with the stream format. The file starts and ends with a magic string ARROW1 (plus padding). What follows in the file is identical to the stream format. At the end of the file, we write a footer containing a redundant copy of the schema (which is a part of the streaming format) plus memory offsets and sizes for each of the data blocks in the file. This enables random access any record batch in the file. See File.fbs for the precise details of the file footer. cc @gaceladri since it can help speed up your training when this one is fixed.
2021-08-04T18:11:01Z
https://github.com/huggingface/datasets/issues/1803
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1803/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1802/comments
https://api.github.com/repos/huggingface/datasets/issues/1802/timeline
2021-02-03T10:06:30Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0ODE4NDIy
closed
[]
false
1,802
{ "avatar_url": "https://avatars.githubusercontent.com/u/53136577?v=4", "events_url": "https://api.github.com/users/thevasudevgupta/events{/privacy}", "followers_url": "https://api.github.com/users/thevasudevgupta/followers", "following_url": "https://api.github.com/users/thevasudevgupta/following{/other_user}", "gists_url": "https://api.github.com/users/thevasudevgupta/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thevasudevgupta", "id": 53136577, "login": "thevasudevgupta", "node_id": "MDQ6VXNlcjUzMTM2NTc3", "organizations_url": "https://api.github.com/users/thevasudevgupta/orgs", "received_events_url": "https://api.github.com/users/thevasudevgupta/received_events", "repos_url": "https://api.github.com/users/thevasudevgupta/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thevasudevgupta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thevasudevgupta/subscriptions", "type": "User", "url": "https://api.github.com/users/thevasudevgupta" }
add github of contributors
https://api.github.com/repos/huggingface/datasets/issues/1802/events
null
https://api.github.com/repos/huggingface/datasets/issues/1802/labels{/name}
2021-02-01T03:49:19Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1802.diff", "html_url": "https://github.com/huggingface/datasets/pull/1802", "merged_at": "2021-02-03T10:06:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/1802.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1802" }
797,924,468
[]
https://api.github.com/repos/huggingface/datasets/issues/1802
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR will add contributors GitHub id at the end of every dataset cards.
2021-02-03T10:09:52Z
https://github.com/huggingface/datasets/pull/1802
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1802/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1801/comments
https://api.github.com/repos/huggingface/datasets/issues/1801/timeline
2021-02-02T13:17:28Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0NzMwODYw
closed
[]
false
1,801
{ "avatar_url": "https://avatars.githubusercontent.com/u/11708999?v=4", "events_url": "https://api.github.com/users/mounicam/events{/privacy}", "followers_url": "https://api.github.com/users/mounicam/followers", "following_url": "https://api.github.com/users/mounicam/following{/other_user}", "gists_url": "https://api.github.com/users/mounicam/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mounicam", "id": 11708999, "login": "mounicam", "node_id": "MDQ6VXNlcjExNzA4OTk5", "organizations_url": "https://api.github.com/users/mounicam/orgs", "received_events_url": "https://api.github.com/users/mounicam/received_events", "repos_url": "https://api.github.com/users/mounicam/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mounicam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mounicam/subscriptions", "type": "User", "url": "https://api.github.com/users/mounicam" }
[GEM] Updated the source link of the data to update correct tokenized version.
https://api.github.com/repos/huggingface/datasets/issues/1801/events
null
https://api.github.com/repos/huggingface/datasets/issues/1801/labels{/name}
2021-01-31T21:17:19Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1801.diff", "html_url": "https://github.com/huggingface/datasets/pull/1801", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1801.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1801" }
797,814,275
[]
https://api.github.com/repos/huggingface/datasets/issues/1801
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
2021-02-02T13:17:38Z
https://github.com/huggingface/datasets/pull/1801
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1801/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1800/comments
https://api.github.com/repos/huggingface/datasets/issues/1800/timeline
2021-02-02T22:49:26Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0NzE5MjA3
closed
[]
false
1,800
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Add DuoRC Dataset
https://api.github.com/repos/huggingface/datasets/issues/1800/events
null
https://api.github.com/repos/huggingface/datasets/issues/1800/labels{/name}
2021-01-31T20:01:59Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1800.diff", "html_url": "https://github.com/huggingface/datasets/pull/1800", "merged_at": "2021-02-02T22:49:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/1800.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1800" }
797,798,689
[]
https://api.github.com/repos/huggingface/datasets/issues/1800
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, DuoRC SelfRC is one type of the [DuoRC Dataset](https://duorc.github.io/). DuoRC SelfRC is a crowdsourced Abstractive/Extractive Question-Answering dataset based on Wikipedia movie plots. It contains examples that may have answers in the movie plot, synthesized answers which are not present in the movie plot, or no answers. I have also added ParaphraseRC - the other type of DuoRC dataset where questions are based on Wikipedia movie plots and answers are based on corresponding IMDb movie plots. Paper : [https://arxiv.org/abs/1804.07927](https://arxiv.org/abs/1804.07927) I want to add this to 🤗 datasets to make it more accessible to the community. I have added all the details that I could find. Please let me know if anything else is needed from my end. Thanks, Gunjan
2021-02-03T05:01:45Z
https://github.com/huggingface/datasets/pull/1800
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1800/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1799/comments
https://api.github.com/repos/huggingface/datasets/issues/1799/timeline
2021-02-09T15:49:58Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0NzEyMzUy
closed
[]
false
1,799
{ "avatar_url": "https://avatars.githubusercontent.com/u/22454783?v=4", "events_url": "https://api.github.com/users/gmihaila/events{/privacy}", "followers_url": "https://api.github.com/users/gmihaila/followers", "following_url": "https://api.github.com/users/gmihaila/following{/other_user}", "gists_url": "https://api.github.com/users/gmihaila/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gmihaila", "id": 22454783, "login": "gmihaila", "node_id": "MDQ6VXNlcjIyNDU0Nzgz", "organizations_url": "https://api.github.com/users/gmihaila/orgs", "received_events_url": "https://api.github.com/users/gmihaila/received_events", "repos_url": "https://api.github.com/users/gmihaila/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gmihaila/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gmihaila/subscriptions", "type": "User", "url": "https://api.github.com/users/gmihaila" }
Update: SWDA - Fixed code to use all metadata features. Added comments and cleaned c…
https://api.github.com/repos/huggingface/datasets/issues/1799/events
null
https://api.github.com/repos/huggingface/datasets/issues/1799/labels{/name}
2021-01-31T19:18:55Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1799.diff", "html_url": "https://github.com/huggingface/datasets/pull/1799", "merged_at": "2021-02-09T15:49:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/1799.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1799" }
797,789,439
[]
https://api.github.com/repos/huggingface/datasets/issues/1799
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This is a dataset I currently use my research and I realized some features are not being returned. Previous code was not using all available metadata and was kind of messy I fixed code to use all metadata and made some modification to be more efficient and better formatted. Please let me know if I need to make any changes.
2021-02-09T22:06:13Z
https://github.com/huggingface/datasets/pull/1799
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1799/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1798/comments
https://api.github.com/repos/huggingface/datasets/issues/1798/timeline
2021-02-03T10:35:54Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0Njk2NjE1
closed
[]
false
1,798
{ "avatar_url": "https://avatars.githubusercontent.com/u/643918?v=4", "events_url": "https://api.github.com/users/mapmeld/events{/privacy}", "followers_url": "https://api.github.com/users/mapmeld/followers", "following_url": "https://api.github.com/users/mapmeld/following{/other_user}", "gists_url": "https://api.github.com/users/mapmeld/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mapmeld", "id": 643918, "login": "mapmeld", "node_id": "MDQ6VXNlcjY0MzkxOA==", "organizations_url": "https://api.github.com/users/mapmeld/orgs", "received_events_url": "https://api.github.com/users/mapmeld/received_events", "repos_url": "https://api.github.com/users/mapmeld/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mapmeld/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mapmeld/subscriptions", "type": "User", "url": "https://api.github.com/users/mapmeld" }
Add Arabic sarcasm dataset
https://api.github.com/repos/huggingface/datasets/issues/1798/events
null
https://api.github.com/repos/huggingface/datasets/issues/1798/labels{/name}
2021-01-31T17:38:55Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1798.diff", "html_url": "https://github.com/huggingface/datasets/pull/1798", "merged_at": "2021-02-03T10:35:54Z", "patch_url": "https://github.com/huggingface/datasets/pull/1798.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1798" }
797,766,818
[]
https://api.github.com/repos/huggingface/datasets/issues/1798
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This MIT license dataset: https://github.com/iabufarha/ArSarcasm Via https://sites.google.com/view/ar-sarcasm-sentiment-detection/
2021-02-10T20:39:13Z
https://github.com/huggingface/datasets/pull/1798
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1798/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1797/comments
https://api.github.com/repos/huggingface/datasets/issues/1797/timeline
2021-08-04T18:09:37Z
null
completed
MDU6SXNzdWU3OTczNTc5MDE=
closed
[]
null
1,797
{ "avatar_url": "https://avatars.githubusercontent.com/u/46243662?v=4", "events_url": "https://api.github.com/users/smile0925/events{/privacy}", "followers_url": "https://api.github.com/users/smile0925/followers", "following_url": "https://api.github.com/users/smile0925/following{/other_user}", "gists_url": "https://api.github.com/users/smile0925/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/smile0925", "id": 46243662, "login": "smile0925", "node_id": "MDQ6VXNlcjQ2MjQzNjYy", "organizations_url": "https://api.github.com/users/smile0925/orgs", "received_events_url": "https://api.github.com/users/smile0925/received_events", "repos_url": "https://api.github.com/users/smile0925/repos", "site_admin": false, "starred_url": "https://api.github.com/users/smile0925/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/smile0925/subscriptions", "type": "User", "url": "https://api.github.com/users/smile0925" }
Connection error
https://api.github.com/repos/huggingface/datasets/issues/1797/events
null
https://api.github.com/repos/huggingface/datasets/issues/1797/labels{/name}
2021-01-30T07:32:45Z
null
false
null
null
797,357,901
[]
https://api.github.com/repos/huggingface/datasets/issues/1797
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I am hitting to the error, help me and thanks. `train_data = datasets.load_dataset("xsum", split="train")` `ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.0.2/datasets/xsum/xsum.py`
2021-08-04T18:09:37Z
https://github.com/huggingface/datasets/issues/1797
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1797/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1796/comments
https://api.github.com/repos/huggingface/datasets/issues/1796/timeline
null
null
null
MDU6SXNzdWU3OTczMjk5MDU=
open
[]
null
1,796
{ "avatar_url": "https://avatars.githubusercontent.com/u/20911334?v=4", "events_url": "https://api.github.com/users/ayubSubhaniya/events{/privacy}", "followers_url": "https://api.github.com/users/ayubSubhaniya/followers", "following_url": "https://api.github.com/users/ayubSubhaniya/following{/other_user}", "gists_url": "https://api.github.com/users/ayubSubhaniya/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ayubSubhaniya", "id": 20911334, "login": "ayubSubhaniya", "node_id": "MDQ6VXNlcjIwOTExMzM0", "organizations_url": "https://api.github.com/users/ayubSubhaniya/orgs", "received_events_url": "https://api.github.com/users/ayubSubhaniya/received_events", "repos_url": "https://api.github.com/users/ayubSubhaniya/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ayubSubhaniya/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ayubSubhaniya/subscriptions", "type": "User", "url": "https://api.github.com/users/ayubSubhaniya" }
Filter on dataset too much slowww
https://api.github.com/repos/huggingface/datasets/issues/1796/events
null
https://api.github.com/repos/huggingface/datasets/issues/1796/labels{/name}
2021-01-30T04:09:19Z
null
false
null
null
797,329,905
[]
https://api.github.com/repos/huggingface/datasets/issues/1796
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I have a dataset with 50M rows. For pre-processing, I need to tokenize this and filter rows with the large sequence. My tokenization took roughly 12mins. I used `map()` with batch size 1024 and multi-process with 96 processes. When I applied the `filter()` function it is taking too much time. I need to filter sequences based on a boolean column. Below are the variants I tried. 1. filter() with batch size 1024, single process (takes roughly 3 hr) 2. filter() with batch size 1024, 96 processes (takes 5-6 hrs ¯\\\_(ツ)\_/¯) 3. filter() with loading all data in memory, only a single boolean column (never ends). Can someone please help? Below is a sample code for small dataset. ``` from datasets import load_dataset dataset = load_dataset('glue', 'mrpc', split='train') dataset = dataset.map(lambda x: {'flag': random.randint(0,1)==1}) def _amplify(data): return data dataset = dataset.filter(_amplify, batch_size=1024, keep_in_memory=False, input_columns=['flag']) ```
2024-01-19T13:25:21Z
https://github.com/huggingface/datasets/issues/1796
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1796/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1795/comments
https://api.github.com/repos/huggingface/datasets/issues/1795/timeline
2021-02-05T09:54:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0MDk5OTUz
closed
[]
false
1,795
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Custom formatting for lazy map + arrow data extraction refactor
https://api.github.com/repos/huggingface/datasets/issues/1795/events
null
https://api.github.com/repos/huggingface/datasets/issues/1795/labels{/name}
2021-01-29T16:35:53Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1795.diff", "html_url": "https://github.com/huggingface/datasets/pull/1795", "merged_at": "2021-02-05T09:54:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/1795.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1795" }
797,021,730
[]
https://api.github.com/repos/huggingface/datasets/issues/1795
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Hi ! This PR refactors the way data are extracted from pyarrow tables to extend it to the use of custom formatting functions. While the internal storage of the dataset is always the Apache Arrow format, by setting a specific format on a dataset, you can cast the output of `datasets.Dataset.__getitem__` in NumPy/pandas/PyTorch/TensorFlow, on-the-fly. A specific format can be activated with `datasets.Dataset.set_format`. For example: `dataset.set_format(type='torch', columns=['label'])`. ### What's new: You can now also define your own formatting function that is applied on-the-fly. To do so you can pass your formatting function in the `transform` parameter of `datasets.Dataset.set_format`, and keep `type` to `None`. A formatting function is a callable that takes a batch (as a dict, formatted as python) as input and returns a batch. Here is an example to tokenize and pad tokens on-the-fly when accessing the samples: ```python from datasets import load_dataset from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") def encode(batch): return tokenizer(batch["sentence1"], padding="longest", truncation=True, max_length=512, return_tensors="pt") dataset = load_dataset("glue", "mrpc", split="train") dataset.set_format(transform=encode) dataset.format # {'type': 'custom', 'format_kwargs': {'transform': <function __main__.encode(batch)>}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} dataset[:2] # {'input_ids': tensor([[ 101, 2572, 3217, ... 102]]), 'token_type_ids': tensor([[0, 0, 0, ... 0]]), 'attention_mask': tensor([[1, 1, 1, ... 1]])} ``` Let me know what you think of this API ! We can still change it if we want to. Especially @sgugger since this may be useful when using `datasets` to train models. EDIT: this was changed to `dataset.set_transform(encode)` ------------------- Note: I had to refactor the way data are extracted and formatted from pyarrow tables and I made it more robust and flexible. In particular I modularized it to be able to unit-test it properly. This was very helpful since I detected some bugs in the previous implementation and was able to fix them. Some bugs I found and fixed: - certain slices/ranges were not supported because negative ids were passed to pyarrow - formatting as numpy/torch/tensorflow a column would make it lose its precision information (for example a column as `Value("float32")`) would be returned as a tensor of float64 (default behavior for numpy) - on windows integers formatted as numpy/torch/tensorflow were not always int64 tensors by default but were int32 The unit tests for those are now really extensive :)
2022-07-30T09:50:11Z
https://github.com/huggingface/datasets/pull/1795
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 3, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1795/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1794/comments
https://api.github.com/repos/huggingface/datasets/issues/1794/timeline
2021-01-29T16:31:38Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0MDYyMTkw
closed
[]
false
1,794
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Move silicone directory
https://api.github.com/repos/huggingface/datasets/issues/1794/events
null
https://api.github.com/repos/huggingface/datasets/issues/1794/labels{/name}
2021-01-29T15:33:15Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1794.diff", "html_url": "https://github.com/huggingface/datasets/pull/1794", "merged_at": "2021-01-29T16:31:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/1794.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1794" }
796,975,588
[]
https://api.github.com/repos/huggingface/datasets/issues/1794
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
The dataset was added in #1761 but not in the right directory. I'm moving it to /datasets
2021-01-29T16:31:39Z
https://github.com/huggingface/datasets/pull/1794
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1794/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1793/comments
https://api.github.com/repos/huggingface/datasets/issues/1793/timeline
2021-01-29T16:53:32Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0MDMzMjk0
closed
[]
false
1,793
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Minor fix the docstring of load_metric
https://api.github.com/repos/huggingface/datasets/issues/1793/events
null
https://api.github.com/repos/huggingface/datasets/issues/1793/labels{/name}
2021-01-29T14:47:35Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1793.diff", "html_url": "https://github.com/huggingface/datasets/pull/1793", "merged_at": "2021-01-29T16:53:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/1793.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1793" }
796,940,299
[]
https://api.github.com/repos/huggingface/datasets/issues/1793
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Minor fix: - duplicated attributes - format fix
2021-01-29T16:53:32Z
https://github.com/huggingface/datasets/pull/1793
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1793/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1792/comments
https://api.github.com/repos/huggingface/datasets/issues/1792/timeline
2021-02-12T14:13:28Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0MDI4NTk1
closed
[]
false
1,792
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Allow loading dataset in-memory
https://api.github.com/repos/huggingface/datasets/issues/1792/events
null
https://api.github.com/repos/huggingface/datasets/issues/1792/labels{/name}
2021-01-29T14:39:50Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1792.diff", "html_url": "https://github.com/huggingface/datasets/pull/1792", "merged_at": "2021-02-12T14:13:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/1792.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1792" }
796,934,627
[]
https://api.github.com/repos/huggingface/datasets/issues/1792
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Allow loading datasets either from: - memory-mapped file (current implementation) - from file descriptor, copying data to physical memory Close #708
2021-02-12T14:13:28Z
https://github.com/huggingface/datasets/pull/1792
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 2, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1792/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1791/comments
https://api.github.com/repos/huggingface/datasets/issues/1791/timeline
2021-01-29T17:05:07Z
null
null
MDExOlB1bGxSZXF1ZXN0NTY0MDE5OTk3
closed
[]
false
1,791
{ "avatar_url": "https://avatars.githubusercontent.com/u/7549587?v=4", "events_url": "https://api.github.com/users/TezRomacH/events{/privacy}", "followers_url": "https://api.github.com/users/TezRomacH/followers", "following_url": "https://api.github.com/users/TezRomacH/following{/other_user}", "gists_url": "https://api.github.com/users/TezRomacH/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TezRomacH", "id": 7549587, "login": "TezRomacH", "node_id": "MDQ6VXNlcjc1NDk1ODc=", "organizations_url": "https://api.github.com/users/TezRomacH/orgs", "received_events_url": "https://api.github.com/users/TezRomacH/received_events", "repos_url": "https://api.github.com/users/TezRomacH/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TezRomacH/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TezRomacH/subscriptions", "type": "User", "url": "https://api.github.com/users/TezRomacH" }
Small fix with corrected logging of train vectors
https://api.github.com/repos/huggingface/datasets/issues/1791/events
null
https://api.github.com/repos/huggingface/datasets/issues/1791/labels{/name}
2021-01-29T14:26:06Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1791.diff", "html_url": "https://github.com/huggingface/datasets/pull/1791", "merged_at": "2021-01-29T17:05:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/1791.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1791" }
796,924,519
[]
https://api.github.com/repos/huggingface/datasets/issues/1791
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Now you can set `train_size` to the whole dataset size via `train_size = -1` and login writes not `Training the index with the first -1 vectors` but (for example) `Training the index with the first 16123 vectors`. And maybe more than dataset length. Logging will be correct
2021-01-29T18:51:10Z
https://github.com/huggingface/datasets/pull/1791
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1791/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1790/comments
https://api.github.com/repos/huggingface/datasets/issues/1790/timeline
null
null
null
MDU6SXNzdWU3OTY2NzgxNTc=
open
[]
null
1,790
{ "avatar_url": "https://avatars.githubusercontent.com/u/6331508?v=4", "events_url": "https://api.github.com/users/miyamonz/events{/privacy}", "followers_url": "https://api.github.com/users/miyamonz/followers", "following_url": "https://api.github.com/users/miyamonz/following{/other_user}", "gists_url": "https://api.github.com/users/miyamonz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/miyamonz", "id": 6331508, "login": "miyamonz", "node_id": "MDQ6VXNlcjYzMzE1MDg=", "organizations_url": "https://api.github.com/users/miyamonz/orgs", "received_events_url": "https://api.github.com/users/miyamonz/received_events", "repos_url": "https://api.github.com/users/miyamonz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/miyamonz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/miyamonz/subscriptions", "type": "User", "url": "https://api.github.com/users/miyamonz" }
ModuleNotFoundError: No module named 'apache_beam', when specific languages.
https://api.github.com/repos/huggingface/datasets/issues/1790/events
null
https://api.github.com/repos/huggingface/datasets/issues/1790/labels{/name}
2021-01-29T08:17:24Z
null
false
null
null
796,678,157
[]
https://api.github.com/repos/huggingface/datasets/issues/1790
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
```py import datasets wiki = datasets.load_dataset('wikipedia', '20200501.ja', cache_dir='./datasets') ``` then `ModuleNotFoundError: No module named 'apache_beam'` happend. The error doesn't appear when it's '20200501.en'. I don't know Apache Beam, but according to #498 it isn't necessary when it's saved to local. is it correct?
2021-03-25T12:10:51Z
https://github.com/huggingface/datasets/issues/1790
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1790/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1789/comments
https://api.github.com/repos/huggingface/datasets/issues/1789/timeline
2021-01-28T18:13:56Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYzNDQyMTc2
closed
[]
false
1,789
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite" }
[BUG FIX] typo in the import path for metrics
https://api.github.com/repos/huggingface/datasets/issues/1789/events
null
https://api.github.com/repos/huggingface/datasets/issues/1789/labels{/name}
2021-01-28T18:01:37Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1789.diff", "html_url": "https://github.com/huggingface/datasets/pull/1789", "merged_at": "2021-01-28T18:13:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/1789.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1789" }
796,229,721
[]
https://api.github.com/repos/huggingface/datasets/issues/1789
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
This tiny PR fixes a typo introduced in https://github.com/huggingface/datasets/pull/1726 which prevents loading new metrics
2021-01-28T18:13:56Z
https://github.com/huggingface/datasets/pull/1789
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1789/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1788/comments
https://api.github.com/repos/huggingface/datasets/issues/1788/timeline
2021-01-28T18:46:13Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYyODc1NzA2
closed
[]
true
1,788
{ "avatar_url": "https://avatars.githubusercontent.com/u/2062185?v=4", "events_url": "https://api.github.com/users/songfeng/events{/privacy}", "followers_url": "https://api.github.com/users/songfeng/followers", "following_url": "https://api.github.com/users/songfeng/following{/other_user}", "gists_url": "https://api.github.com/users/songfeng/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/songfeng", "id": 2062185, "login": "songfeng", "node_id": "MDQ6VXNlcjIwNjIxODU=", "organizations_url": "https://api.github.com/users/songfeng/orgs", "received_events_url": "https://api.github.com/users/songfeng/received_events", "repos_url": "https://api.github.com/users/songfeng/repos", "site_admin": false, "starred_url": "https://api.github.com/users/songfeng/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/songfeng/subscriptions", "type": "User", "url": "https://api.github.com/users/songfeng" }
Doc2dial rc
https://api.github.com/repos/huggingface/datasets/issues/1788/events
null
https://api.github.com/repos/huggingface/datasets/issues/1788/labels{/name}
2021-01-27T23:51:00Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1788.diff", "html_url": "https://github.com/huggingface/datasets/pull/1788", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1788.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1788" }
795,544,422
[]
https://api.github.com/repos/huggingface/datasets/issues/1788
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
2021-01-28T18:46:13Z
https://github.com/huggingface/datasets/pull/1788
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1788/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1787/comments
https://api.github.com/repos/huggingface/datasets/issues/1787/timeline
2021-01-28T13:56:29Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYyODI1NTI3
closed
[]
false
1,787
{ "avatar_url": "https://avatars.githubusercontent.com/u/10104354?v=4", "events_url": "https://api.github.com/users/yuchenlin/events{/privacy}", "followers_url": "https://api.github.com/users/yuchenlin/followers", "following_url": "https://api.github.com/users/yuchenlin/following{/other_user}", "gists_url": "https://api.github.com/users/yuchenlin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yuchenlin", "id": 10104354, "login": "yuchenlin", "node_id": "MDQ6VXNlcjEwMTA0MzU0", "organizations_url": "https://api.github.com/users/yuchenlin/orgs", "received_events_url": "https://api.github.com/users/yuchenlin/received_events", "repos_url": "https://api.github.com/users/yuchenlin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yuchenlin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuchenlin/subscriptions", "type": "User", "url": "https://api.github.com/users/yuchenlin" }
Update the CommonGen citation information
https://api.github.com/repos/huggingface/datasets/issues/1787/events
null
https://api.github.com/repos/huggingface/datasets/issues/1787/labels{/name}
2021-01-27T22:12:47Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1787.diff", "html_url": "https://github.com/huggingface/datasets/pull/1787", "merged_at": "2021-01-28T13:56:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/1787.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1787" }
795,485,842
[]
https://api.github.com/repos/huggingface/datasets/issues/1787
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
2021-01-28T13:56:29Z
https://github.com/huggingface/datasets/pull/1787
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1787/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1786/comments
https://api.github.com/repos/huggingface/datasets/issues/1786/timeline
2021-04-23T15:17:39Z
null
completed
MDU6SXNzdWU3OTU0NjI4MTY=
closed
[]
null
1,786
{ "avatar_url": "https://avatars.githubusercontent.com/u/78090287?v=4", "events_url": "https://api.github.com/users/kkhan188/events{/privacy}", "followers_url": "https://api.github.com/users/kkhan188/followers", "following_url": "https://api.github.com/users/kkhan188/following{/other_user}", "gists_url": "https://api.github.com/users/kkhan188/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kkhan188", "id": 78090287, "login": "kkhan188", "node_id": "MDQ6VXNlcjc4MDkwMjg3", "organizations_url": "https://api.github.com/users/kkhan188/orgs", "received_events_url": "https://api.github.com/users/kkhan188/received_events", "repos_url": "https://api.github.com/users/kkhan188/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kkhan188/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kkhan188/subscriptions", "type": "User", "url": "https://api.github.com/users/kkhan188" }
How to use split dataset
https://api.github.com/repos/huggingface/datasets/issues/1786/events
null
https://api.github.com/repos/huggingface/datasets/issues/1786/labels{/name}
2021-01-27T21:37:47Z
null
false
null
null
795,462,816
[ { "color": "d876e3", "default": true, "description": "Further information is requested", "id": 1935892912, "name": "question", "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question" } ]
https://api.github.com/repos/huggingface/datasets/issues/1786
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
![Capture1](https://user-images.githubusercontent.com/78090287/106057436-cb6a1f00-6111-11eb-8c9c-3658065b1fdf.PNG) Hey, I want to split the lambada dataset into corpus, test, train and valid txt files (like penn treebank) but I am not able to achieve this. What I am doing is, executing the lambada.py file in my project but its not giving desired results. Any help will be appreciated!
2021-04-23T15:17:39Z
https://github.com/huggingface/datasets/issues/1786
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1786/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1785/comments
https://api.github.com/repos/huggingface/datasets/issues/1785/timeline
2021-01-30T01:07:56Z
null
completed
MDU6SXNzdWU3OTU0NTg4NTY=
closed
[]
null
1,785
{ "avatar_url": "https://avatars.githubusercontent.com/u/4341867?v=4", "events_url": "https://api.github.com/users/olinguyen/events{/privacy}", "followers_url": "https://api.github.com/users/olinguyen/followers", "following_url": "https://api.github.com/users/olinguyen/following{/other_user}", "gists_url": "https://api.github.com/users/olinguyen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/olinguyen", "id": 4341867, "login": "olinguyen", "node_id": "MDQ6VXNlcjQzNDE4Njc=", "organizations_url": "https://api.github.com/users/olinguyen/orgs", "received_events_url": "https://api.github.com/users/olinguyen/received_events", "repos_url": "https://api.github.com/users/olinguyen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/olinguyen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/olinguyen/subscriptions", "type": "User", "url": "https://api.github.com/users/olinguyen" }
Not enough disk space (Needed: Unknown size) when caching on a cluster
https://api.github.com/repos/huggingface/datasets/issues/1785/events
null
https://api.github.com/repos/huggingface/datasets/issues/1785/labels{/name}
2021-01-27T21:30:59Z
null
false
null
null
795,458,856
[]
https://api.github.com/repos/huggingface/datasets/issues/1785
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
I'm running some experiments where I'm caching datasets on a cluster and accessing it through multiple compute nodes. However, I get an error when loading the cached dataset from the shared disk. The exact error thrown: ```bash >>> load_dataset(dataset, cache_dir="/path/to/cluster/shared/path") OSError: Not enough disk space. Needed: Unknown size (download: Unknown size, generated: Unknown size, post-processed: Unknown size) ``` [`utils.has_sufficient_disk_space`](https://github.com/huggingface/datasets/blob/8a03ab7d123a76ee744304f21ce868c75f411214/src/datasets/utils/py_utils.py#L332) fails on each job because of how the cluster system is designed (`disk_usage(".").free` can't compute on the cluster's shared disk). This is exactly where the error gets thrown: https://github.com/huggingface/datasets/blob/master/src/datasets/builder.py#L502 ```python if not utils.has_sufficient_disk_space(self.info.size_in_bytes or 0, directory=self._cache_dir_root): raise IOError( "Not enough disk space. Needed: {} (download: {}, generated: {}, post-processed: {})".format( utils.size_str(self.info.size_in_bytes or 0), utils.size_str(self.info.download_size or 0), utils.size_str(self.info.dataset_size or 0), utils.size_str(self.info.post_processing_size or 0), ) ) ``` What would be a good way to circumvent this? my current fix is to manually comment out that part, but that is not ideal. Would it be possible to pass a flag to skip this check on disk space?
2022-11-07T16:33:03Z
https://github.com/huggingface/datasets/issues/1785
{ "+1": 5, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 5, "url": "https://api.github.com/repos/huggingface/datasets/issues/1785/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1784/comments
https://api.github.com/repos/huggingface/datasets/issues/1784/timeline
2021-01-31T08:47:18Z
null
completed
MDU6SXNzdWU3OTQ2NTkxNzQ=
closed
[]
null
1,784
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
JSONDecodeError on JSON with multiple lines
https://api.github.com/repos/huggingface/datasets/issues/1784/events
null
https://api.github.com/repos/huggingface/datasets/issues/1784/labels{/name}
2021-01-27T00:19:22Z
null
false
null
null
794,659,174
[]
https://api.github.com/repos/huggingface/datasets/issues/1784
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hello :), I have been trying to load data using a JSON file. Based on the [docs](https://huggingface.co/docs/datasets/loading_datasets.html#json-files), the following format is supported: ```json {"key1":11, "key2":12, "key3":13} {"key1":21, "key2":22, "key3":23} ``` But, when I try loading a dataset with the same format, I get a JSONDecodeError : `JSONDecodeError: Extra data: line 2 column 1 (char 7142)`. Now, this is expected when using `json` to load a JSON file. But I was wondering if there are any special arguments to pass when using `load_dataset` as the docs suggest that this format is supported. When I convert the JSON file to a list of dictionaries format, I get AttributeError: `AttributeError: 'list' object has no attribute 'keys'`. So, I can't convert them to list of dictionaries either. Please let me know :) Thanks, Gunjan
2021-01-31T08:47:18Z
https://github.com/huggingface/datasets/issues/1784
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1784/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1783/comments
https://api.github.com/repos/huggingface/datasets/issues/1783/timeline
2021-02-01T13:58:44Z
null
completed
MDU6SXNzdWU3OTQ1NDQ0OTU=
closed
[]
null
1,783
{ "avatar_url": "https://avatars.githubusercontent.com/u/30875246?v=4", "events_url": "https://api.github.com/users/ChewKokWah/events{/privacy}", "followers_url": "https://api.github.com/users/ChewKokWah/followers", "following_url": "https://api.github.com/users/ChewKokWah/following{/other_user}", "gists_url": "https://api.github.com/users/ChewKokWah/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChewKokWah", "id": 30875246, "login": "ChewKokWah", "node_id": "MDQ6VXNlcjMwODc1MjQ2", "organizations_url": "https://api.github.com/users/ChewKokWah/orgs", "received_events_url": "https://api.github.com/users/ChewKokWah/received_events", "repos_url": "https://api.github.com/users/ChewKokWah/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChewKokWah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChewKokWah/subscriptions", "type": "User", "url": "https://api.github.com/users/ChewKokWah" }
Dataset Examples Explorer
https://api.github.com/repos/huggingface/datasets/issues/1783/events
null
https://api.github.com/repos/huggingface/datasets/issues/1783/labels{/name}
2021-01-26T20:39:02Z
null
false
null
null
794,544,495
[]
https://api.github.com/repos/huggingface/datasets/issues/1783
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
In the Older version of the Dataset, there are a useful Dataset Explorer that allow user to visualize the examples (training, test and validation) of a particular dataset, it is no longer there in current version. Hope HuggingFace can re-enable the feature that at least allow viewing of the first 20 examples of a particular dataset, or alternatively can extract 20 examples for each datasets and make those part of the Dataset Card Documentation.
2021-02-01T13:58:44Z
https://github.com/huggingface/datasets/issues/1783
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1783/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1782/comments
https://api.github.com/repos/huggingface/datasets/issues/1782/timeline
2021-01-26T13:50:49Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYxNzI5OTc3
closed
[]
false
1,782
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Update pyarrow import warning
https://api.github.com/repos/huggingface/datasets/issues/1782/events
null
https://api.github.com/repos/huggingface/datasets/issues/1782/labels{/name}
2021-01-26T11:47:11Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1782.diff", "html_url": "https://github.com/huggingface/datasets/pull/1782", "merged_at": "2021-01-26T13:50:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/1782.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1782" }
794,167,920
[]
https://api.github.com/repos/huggingface/datasets/issues/1782
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Update the minimum version to >=0.17.1 in the pyarrow version check and update the message. I also moved the check at the top of the __init__.py
2021-01-26T13:50:50Z
https://github.com/huggingface/datasets/pull/1782
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1782/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1781/comments
https://api.github.com/repos/huggingface/datasets/issues/1781/timeline
2022-10-05T12:37:06Z
null
completed
MDU6SXNzdWU3OTM5MTQ1NTY=
closed
[]
null
1,781
{ "avatar_url": "https://avatars.githubusercontent.com/u/45964869?v=4", "events_url": "https://api.github.com/users/PalaashAgrawal/events{/privacy}", "followers_url": "https://api.github.com/users/PalaashAgrawal/followers", "following_url": "https://api.github.com/users/PalaashAgrawal/following{/other_user}", "gists_url": "https://api.github.com/users/PalaashAgrawal/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PalaashAgrawal", "id": 45964869, "login": "PalaashAgrawal", "node_id": "MDQ6VXNlcjQ1OTY0ODY5", "organizations_url": "https://api.github.com/users/PalaashAgrawal/orgs", "received_events_url": "https://api.github.com/users/PalaashAgrawal/received_events", "repos_url": "https://api.github.com/users/PalaashAgrawal/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PalaashAgrawal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PalaashAgrawal/subscriptions", "type": "User", "url": "https://api.github.com/users/PalaashAgrawal" }
AttributeError: module 'pyarrow' has no attribute 'PyExtensionType' during import
https://api.github.com/repos/huggingface/datasets/issues/1781/events
null
https://api.github.com/repos/huggingface/datasets/issues/1781/labels{/name}
2021-01-26T04:18:35Z
null
false
null
null
793,914,556
[]
https://api.github.com/repos/huggingface/datasets/issues/1781
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I'm using Colab. And suddenly this morning, there is this error. Have a look below! ![screenshot-colab research google com-2021 01 26-08-15-36](https://user-images.githubusercontent.com/45964869/105799890-fdaf3b80-5fae-11eb-8f06-11b65cdccc30.png)
2022-10-05T12:37:06Z
https://github.com/huggingface/datasets/issues/1781
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1781/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1780/comments
https://api.github.com/repos/huggingface/datasets/issues/1780/timeline
2021-01-28T10:19:45Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYxNDkxNTgy
closed
[]
false
1,780
{ "avatar_url": "https://avatars.githubusercontent.com/u/3091916?v=4", "events_url": "https://api.github.com/users/dwadden/events{/privacy}", "followers_url": "https://api.github.com/users/dwadden/followers", "following_url": "https://api.github.com/users/dwadden/following{/other_user}", "gists_url": "https://api.github.com/users/dwadden/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dwadden", "id": 3091916, "login": "dwadden", "node_id": "MDQ6VXNlcjMwOTE5MTY=", "organizations_url": "https://api.github.com/users/dwadden/orgs", "received_events_url": "https://api.github.com/users/dwadden/received_events", "repos_url": "https://api.github.com/users/dwadden/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dwadden/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dwadden/subscriptions", "type": "User", "url": "https://api.github.com/users/dwadden" }
Update SciFact URL
https://api.github.com/repos/huggingface/datasets/issues/1780/events
null
https://api.github.com/repos/huggingface/datasets/issues/1780/labels{/name}
2021-01-26T02:49:06Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1780.diff", "html_url": "https://github.com/huggingface/datasets/pull/1780", "merged_at": "2021-01-28T10:19:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/1780.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1780" }
793,882,132
[]
https://api.github.com/repos/huggingface/datasets/issues/1780
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, I'm following up this [issue](https://github.com/huggingface/datasets/issues/1717). I'm the SciFact dataset creator, and I'm trying to update the SciFact data url in your repo. Thanks again for adding the dataset! Basically, I'd just like to change the `_URL` to `"https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz"`. I changed `scifact.py` appropriately and tried running ``` python datasets-cli test datasets/scifact --save_infos --all_configs ``` which I was hoping would update the `dataset_infos.json` for SciFact. But for some reason the code still seems to be looking for the old version of the dataset. Full stack trace below. I've tried to clear all my Huggingface-related caches, and I've `git grep`'d to make sure that the old path to the dataset isn't floating around somewhere. So I'm not sure why this is happening? Can you help me switch the download URL? ``` (datasets) $ python datasets-cli test datasets/scifact --save_infos --all_configs Checking datasets/scifact/scifact.py for additional imports. Found main folder for dataset datasets/scifact/scifact.py at /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact Found specific version folder for dataset datasets/scifact/scifact.py at /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534 Found script file from datasets/scifact/scifact.py to /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534/scifact.py Found dataset infos file from datasets/scifact/dataset_infos.json to /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534/dataset_infos.json Found metadata file for dataset datasets/scifact/scifact.py at /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534/scifact.json Loading Dataset Infos from /Users/dwadden/.cache/huggingface/modules/datasets_modules/datasets/scifact/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534 Testing builder 'corpus' (1/2) Generating dataset scifact (/Users/dwadden/.cache/huggingface/datasets/scifact/corpus/1.0.0/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534) Downloading and preparing dataset scifact/corpus (download: 2.72 MiB, generated: 7.63 MiB, post-processed: Unknown size, total: 10.35 MiB) to /Users/dwadden/.cache/huggingface/datasets/scifact/corpus/1.0.0/2b43b4e125ce3369da7d6353961d9d315e6593f24cc7bbe9ede5e5c911d11534... Downloading took 0.0 min Checksum Computation took 0.0 min Traceback (most recent call last): File "/Users/dwadden/proj/datasets/datasets-cli", line 36, in <module> service.run() File "/Users/dwadden/proj/datasets/src/datasets/commands/test.py", line 139, in run builder.download_and_prepare( File "/Users/dwadden/proj/datasets/src/datasets/builder.py", line 562, in download_and_prepare self._download_and_prepare( File "/Users/dwadden/proj/datasets/src/datasets/builder.py", line 622, in _download_and_prepare verify_checksums( File "/Users/dwadden/proj/datasets/src/datasets/utils/info_utils.py", line 32, in verify_checksums raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) datasets.utils.info_utils.ExpectedMoreDownloadedFiles: {'https://ai2-s2-scifact.s3-us-west-2.amazonaws.com/release/2020-05-01/data.tar.gz'} ```
2021-01-28T18:48:00Z
https://github.com/huggingface/datasets/pull/1780
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1780/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1779/comments
https://api.github.com/repos/huggingface/datasets/issues/1779/timeline
2021-01-26T10:20:19Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYxMjEwNjI5
closed
[]
false
1,779
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Ignore definition line number of functions for caching
https://api.github.com/repos/huggingface/datasets/issues/1779/events
null
https://api.github.com/repos/huggingface/datasets/issues/1779/labels{/name}
2021-01-25T16:42:29Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1779.diff", "html_url": "https://github.com/huggingface/datasets/pull/1779", "merged_at": "2021-01-26T10:20:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/1779.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1779" }
793,539,703
[]
https://api.github.com/repos/huggingface/datasets/issues/1779
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in #1718 , when a function used for processing with `map` is moved inside its python file, then the change of line number causes the caching mechanism to consider it as a different function. Therefore in this case, it recomputes everything. This is because we were not ignoring the line number definition for such functions (even though we're doing it for lambda functions). For example this code currently prints False: ```python from datasets.fingerprint import Hasher # define once def foo(x): return x h = Hasher.hash(foo) # define a second time elsewhere def foo(x): return x print(h == Hasher.hash(foo)) ``` I changed this by ignoring the line number for all functions.
2021-01-26T10:20:20Z
https://github.com/huggingface/datasets/pull/1779
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1779/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1778/comments
https://api.github.com/repos/huggingface/datasets/issues/1778/timeline
2021-01-29T09:34:51Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYxMTU2Mzk1
closed
[]
false
1,778
{ "avatar_url": "https://avatars.githubusercontent.com/u/18527321?v=4", "events_url": "https://api.github.com/users/rsanjaykamath/events{/privacy}", "followers_url": "https://api.github.com/users/rsanjaykamath/followers", "following_url": "https://api.github.com/users/rsanjaykamath/following{/other_user}", "gists_url": "https://api.github.com/users/rsanjaykamath/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rsanjaykamath", "id": 18527321, "login": "rsanjaykamath", "node_id": "MDQ6VXNlcjE4NTI3MzIx", "organizations_url": "https://api.github.com/users/rsanjaykamath/orgs", "received_events_url": "https://api.github.com/users/rsanjaykamath/received_events", "repos_url": "https://api.github.com/users/rsanjaykamath/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rsanjaykamath/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rsanjaykamath/subscriptions", "type": "User", "url": "https://api.github.com/users/rsanjaykamath" }
Narrative QA Manual
https://api.github.com/repos/huggingface/datasets/issues/1778/events
null
https://api.github.com/repos/huggingface/datasets/issues/1778/labels{/name}
2021-01-25T15:22:31Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1778.diff", "html_url": "https://github.com/huggingface/datasets/pull/1778", "merged_at": "2021-01-29T09:34:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/1778.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1778" }
793,474,507
[]
https://api.github.com/repos/huggingface/datasets/issues/1778
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Submitting the manual version of Narrative QA script which requires a manual download from the original repository
2021-01-29T09:35:14Z
https://github.com/huggingface/datasets/pull/1778
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1778/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1777/comments
https://api.github.com/repos/huggingface/datasets/issues/1777/timeline
2021-01-25T11:12:53Z
null
completed
MDU6SXNzdWU3OTMyNzM3NzA=
closed
[]
null
1,777
{ "avatar_url": "https://avatars.githubusercontent.com/u/76427077?v=4", "events_url": "https://api.github.com/users/nlp-student/events{/privacy}", "followers_url": "https://api.github.com/users/nlp-student/followers", "following_url": "https://api.github.com/users/nlp-student/following{/other_user}", "gists_url": "https://api.github.com/users/nlp-student/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nlp-student", "id": 76427077, "login": "nlp-student", "node_id": "MDQ6VXNlcjc2NDI3MDc3", "organizations_url": "https://api.github.com/users/nlp-student/orgs", "received_events_url": "https://api.github.com/users/nlp-student/received_events", "repos_url": "https://api.github.com/users/nlp-student/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nlp-student/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nlp-student/subscriptions", "type": "User", "url": "https://api.github.com/users/nlp-student" }
GPT2 MNLI training using run_glue.py
https://api.github.com/repos/huggingface/datasets/issues/1777/events
null
https://api.github.com/repos/huggingface/datasets/issues/1777/labels{/name}
2021-01-25T10:53:52Z
null
false
null
null
793,273,770
[]
https://api.github.com/repos/huggingface/datasets/issues/1777
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Edit: I'm closing this because I actually meant to post this in `transformers `not `datasets` Running this on Google Colab, ``` !python run_glue.py \ --model_name_or_path gpt2 \ --task_name mnli \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_gpu_train_batch_size 10 \ --gradient_accumulation_steps 32\ --learning_rate 2e-5 \ --num_train_epochs 3.0 \ --output_dir models/gpt2/mnli/ ``` I get the following error, ``` "Asking to pad but the tokenizer does not have a padding token. " ValueError: Asking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`. ``` Do I need to modify the trainer to work with GPT2 ?
2021-01-25T11:12:53Z
https://github.com/huggingface/datasets/issues/1777
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1777/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1776/comments
https://api.github.com/repos/huggingface/datasets/issues/1776/timeline
2021-05-20T04:15:58Z
null
completed
MDU6SXNzdWU3OTI3NTUyNDk=
closed
[]
null
1,776
{ "avatar_url": "https://avatars.githubusercontent.com/u/14048129?v=4", "events_url": "https://api.github.com/users/shuaihuaiyi/events{/privacy}", "followers_url": "https://api.github.com/users/shuaihuaiyi/followers", "following_url": "https://api.github.com/users/shuaihuaiyi/following{/other_user}", "gists_url": "https://api.github.com/users/shuaihuaiyi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shuaihuaiyi", "id": 14048129, "login": "shuaihuaiyi", "node_id": "MDQ6VXNlcjE0MDQ4MTI5", "organizations_url": "https://api.github.com/users/shuaihuaiyi/orgs", "received_events_url": "https://api.github.com/users/shuaihuaiyi/received_events", "repos_url": "https://api.github.com/users/shuaihuaiyi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shuaihuaiyi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shuaihuaiyi/subscriptions", "type": "User", "url": "https://api.github.com/users/shuaihuaiyi" }
[Question & Bug Report] Can we preprocess a dataset on the fly?
https://api.github.com/repos/huggingface/datasets/issues/1776/events
null
https://api.github.com/repos/huggingface/datasets/issues/1776/labels{/name}
2021-01-24T09:28:24Z
null
false
null
null
792,755,249
[]
https://api.github.com/repos/huggingface/datasets/issues/1776
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I know we can use `Datasets.map` to preprocess a dataset, but I'm using it with very large corpus which generates huge cache file (several TB cache from a 400 GB text file). I have no disk large enough to save it. Can we preprocess a dataset on the fly without generating cache? BTW, I tried raising `writer_batch_size`. Seems that argument doesn't have any effect when it's larger than `batch_size`, because you are saving all the batch instantly after it's processed. Please check the following code: https://github.com/huggingface/datasets/blob/0281f9d881f3a55c89aeaa642f1ba23444b64083/src/datasets/arrow_dataset.py#L1532
2021-05-20T04:15:58Z
https://github.com/huggingface/datasets/issues/1776
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1776/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1775/comments
https://api.github.com/repos/huggingface/datasets/issues/1775/timeline
2021-01-24T09:50:39Z
null
completed
MDU6SXNzdWU3OTI3NDIxMjA=
closed
[]
null
1,775
{ "avatar_url": "https://avatars.githubusercontent.com/u/11826803?v=4", "events_url": "https://api.github.com/users/zhongpeixiang/events{/privacy}", "followers_url": "https://api.github.com/users/zhongpeixiang/followers", "following_url": "https://api.github.com/users/zhongpeixiang/following{/other_user}", "gists_url": "https://api.github.com/users/zhongpeixiang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zhongpeixiang", "id": 11826803, "login": "zhongpeixiang", "node_id": "MDQ6VXNlcjExODI2ODAz", "organizations_url": "https://api.github.com/users/zhongpeixiang/orgs", "received_events_url": "https://api.github.com/users/zhongpeixiang/received_events", "repos_url": "https://api.github.com/users/zhongpeixiang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zhongpeixiang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zhongpeixiang/subscriptions", "type": "User", "url": "https://api.github.com/users/zhongpeixiang" }
Efficient ways to iterate the dataset
https://api.github.com/repos/huggingface/datasets/issues/1775/events
null
https://api.github.com/repos/huggingface/datasets/issues/1775/labels{/name}
2021-01-24T07:54:31Z
null
false
null
null
792,742,120
[]
https://api.github.com/repos/huggingface/datasets/issues/1775
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
For a large dataset that does not fits the memory, how can I select only a subset of features from each example? If I iterate over the dataset and then select the subset of features one by one, the resulted memory usage will be huge. Any ways to solve this? Thanks
2021-01-24T09:50:39Z
https://github.com/huggingface/datasets/issues/1775
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1775/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1774/comments
https://api.github.com/repos/huggingface/datasets/issues/1774/timeline
2024-01-31T15:54:18Z
null
completed
MDU6SXNzdWU3OTI3MzA1NTk=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" } ]
null
1,774
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec" }
is it possible to make slice to be more compatible like python list and numpy?
https://api.github.com/repos/huggingface/datasets/issues/1774/events
null
https://api.github.com/repos/huggingface/datasets/issues/1774/labels{/name}
2021-01-24T06:15:52Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
null
792,730,559
[]
https://api.github.com/repos/huggingface/datasets/issues/1774
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, see below error: ``` AssertionError: Requested slice [:10000000000000000] incompatible with 20 examples. ```
2024-01-31T15:54:18Z
https://github.com/huggingface/datasets/issues/1774
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1774/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1773/comments
https://api.github.com/repos/huggingface/datasets/issues/1773/timeline
2021-08-04T18:13:01Z
null
completed
MDU6SXNzdWU3OTI3MDgxNjA=
closed
[]
null
1,773
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
bug in loading datasets
https://api.github.com/repos/huggingface/datasets/issues/1773/events
null
https://api.github.com/repos/huggingface/datasets/issues/1773/labels{/name}
2021-01-24T02:53:45Z
null
false
null
null
792,708,160
[]
https://api.github.com/repos/huggingface/datasets/issues/1773
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, I need to load a dataset, I use these commands: ``` from datasets import load_dataset dataset = load_dataset('csv', data_files={'train': 'sick/train.csv', 'test': 'sick/test.csv', 'validation': 'sick/validation.csv'}) print(dataset['validation']) ``` the dataset in sick/train.csv are simple csv files representing the data. I am getting this error, do you have an idea how I can solve this? thank you @lhoestq ``` Using custom data configuration default Downloading and preparing dataset csv/default-61468fc71a743ec1 (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /julia/cache_home_2/datasets/csv/default-61468fc71a743ec1/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2... Traceback (most recent call last): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 485, in incomplete_dir yield tmp_dir File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 527, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 604, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 959, in _prepare_split for key, table in utils.tqdm(generator, unit=" tables", leave=False, disable=not_verbose): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/tqdm-4.49.0-py3.7.egg/tqdm/std.py", line 1133, in __iter__ for obj in iterable: File "/julia/cache_home_2/modules/datasets_modules/datasets/csv/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2/csv.py", line 129, in _generate_tables for batch_idx, df in enumerate(csv_file_reader): File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1029, in __next__ return self.get_chunk() File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1079, in get_chunk return self.read(nrows=size) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 1052, in read index, columns, col_dict = self._engine.read(nrows) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/pandas-1.2.0-py3.7-linux-x86_64.egg/pandas/io/parsers.py", line 2056, in read data = self._reader.read(nrows) File "pandas/_libs/parsers.pyx", line 756, in pandas._libs.parsers.TextReader.read File "pandas/_libs/parsers.pyx", line 783, in pandas._libs.parsers.TextReader._read_low_memory File "pandas/_libs/parsers.pyx", line 827, in pandas._libs.parsers.TextReader._read_rows File "pandas/_libs/parsers.pyx", line 814, in pandas._libs.parsers.TextReader._tokenize_rows File "pandas/_libs/parsers.pyx", line 1951, in pandas._libs.parsers.raise_parser_error pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 37, saw 2 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "write_sick.py", line 19, in <module> 'validation': 'sick/validation.csv'}) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/load.py", line 612, in load_dataset ignore_verifications=ignore_verifications, File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 534, in download_and_prepare self._save_info() File "/julia/libs/anaconda3/envs/success/lib/python3.7/contextlib.py", line 130, in __exit__ self.gen.throw(type, value, traceback) File "/julia/libs/anaconda3/envs/success/lib/python3.7/site-packages/datasets-1.2.0-py3.7.egg/datasets/builder.py", line 491, in incomplete_dir shutil.rmtree(tmp_dir) File "/julia/libs/anaconda3/envs/success/lib/python3.7/shutil.py", line 498, in rmtree onerror(os.rmdir, path, sys.exc_info()) File "/julia/libs/anaconda3/envs/success/lib/python3.7/shutil.py", line 496, in rmtree os.rmdir(path) OSError: [Errno 39] Directory not empty: '/julia/cache_home_2/datasets/csv/default-61468fc71a743ec1/0.0.0/2960f95a26e85d40ca41a230ac88787f715ee3003edaacb8b1f0891e9f04dda2.incomplete' ```
2021-09-06T08:54:46Z
https://github.com/huggingface/datasets/issues/1773
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1773/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1772/comments
https://api.github.com/repos/huggingface/datasets/issues/1772/timeline
2021-02-05T15:49:25Z
null
completed
MDU6SXNzdWU3OTI3MDM3OTc=
closed
[]
null
1,772
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
Adding SICK dataset
https://api.github.com/repos/huggingface/datasets/issues/1772/events
null
https://api.github.com/repos/huggingface/datasets/issues/1772/labels{/name}
2021-01-24T02:15:31Z
null
false
null
null
792,703,797
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
https://api.github.com/repos/huggingface/datasets/issues/1772
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi It would be great to include SICK dataset. ## Adding a Dataset - **Name:** SICK - **Description:** a well known entailment dataset - **Paper:** http://marcobaroni.org/composes/sick.html - **Data:** http://marcobaroni.org/composes/sick.html - **Motivation:** this is an important NLI benchmark Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md). thanks
2021-02-05T15:49:25Z
https://github.com/huggingface/datasets/issues/1772
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1772/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1771/comments
https://api.github.com/repos/huggingface/datasets/issues/1771/timeline
2021-01-24T23:06:29Z
null
completed
MDU6SXNzdWU3OTI3MDEyNzY=
closed
[]
null
1,771
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec" }
Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/csv/csv.py
https://api.github.com/repos/huggingface/datasets/issues/1771/events
null
https://api.github.com/repos/huggingface/datasets/issues/1771/labels{/name}
2021-01-24T01:53:52Z
null
false
null
null
792,701,276
[]
https://api.github.com/repos/huggingface/datasets/issues/1771
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, When I load_dataset from local csv files, below error happened, looks raw.githubusercontent.com was blocked by the chinese government. But why it need to download csv.py? should it include when pip install the dataset? ``` Traceback (most recent call last): File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/home/tom/pyenv/pystory/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/csv/csv.py ```
2021-01-24T23:06:29Z
https://github.com/huggingface/datasets/issues/1771
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1771/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1770/comments
https://api.github.com/repos/huggingface/datasets/issues/1770/timeline
2022-06-01T15:43:15Z
null
completed
MDU6SXNzdWU3OTI2OTgxNDg=
closed
[]
null
1,770
{ "avatar_url": "https://avatars.githubusercontent.com/u/7607120?v=4", "events_url": "https://api.github.com/users/world2vec/events{/privacy}", "followers_url": "https://api.github.com/users/world2vec/followers", "following_url": "https://api.github.com/users/world2vec/following{/other_user}", "gists_url": "https://api.github.com/users/world2vec/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/world2vec", "id": 7607120, "login": "world2vec", "node_id": "MDQ6VXNlcjc2MDcxMjA=", "organizations_url": "https://api.github.com/users/world2vec/orgs", "received_events_url": "https://api.github.com/users/world2vec/received_events", "repos_url": "https://api.github.com/users/world2vec/repos", "site_admin": false, "starred_url": "https://api.github.com/users/world2vec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/world2vec/subscriptions", "type": "User", "url": "https://api.github.com/users/world2vec" }
how can I combine 2 dataset with different/same features?
https://api.github.com/repos/huggingface/datasets/issues/1770/events
null
https://api.github.com/repos/huggingface/datasets/issues/1770/labels{/name}
2021-01-24T01:26:06Z
null
false
null
null
792,698,148
[]
https://api.github.com/repos/huggingface/datasets/issues/1770
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
to combine 2 dataset by one-one map like ds = zip(ds1, ds2): ds1: {'text'}, ds2: {'text'}, combine ds:{'src', 'tgt'} or different feature: ds1: {'src'}, ds2: {'tgt'}, combine ds:{'src', 'tgt'}
2022-06-01T15:43:15Z
https://github.com/huggingface/datasets/issues/1770
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1770/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1769/comments
https://api.github.com/repos/huggingface/datasets/issues/1769/timeline
2022-10-05T12:38:51Z
null
completed
MDU6SXNzdWU3OTI1MjMyODQ=
closed
[]
null
1,769
{ "avatar_url": "https://avatars.githubusercontent.com/u/14048129?v=4", "events_url": "https://api.github.com/users/shuaihuaiyi/events{/privacy}", "followers_url": "https://api.github.com/users/shuaihuaiyi/followers", "following_url": "https://api.github.com/users/shuaihuaiyi/following{/other_user}", "gists_url": "https://api.github.com/users/shuaihuaiyi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shuaihuaiyi", "id": 14048129, "login": "shuaihuaiyi", "node_id": "MDQ6VXNlcjE0MDQ4MTI5", "organizations_url": "https://api.github.com/users/shuaihuaiyi/orgs", "received_events_url": "https://api.github.com/users/shuaihuaiyi/received_events", "repos_url": "https://api.github.com/users/shuaihuaiyi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shuaihuaiyi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shuaihuaiyi/subscriptions", "type": "User", "url": "https://api.github.com/users/shuaihuaiyi" }
_pickle.PicklingError: Can't pickle typing.Union[str, NoneType]: it's not the same object as typing.Union when calling datasets.map with num_proc=2
https://api.github.com/repos/huggingface/datasets/issues/1769/events
null
https://api.github.com/repos/huggingface/datasets/issues/1769/labels{/name}
2021-01-23T10:13:00Z
null
false
null
null
792,523,284
[]
https://api.github.com/repos/huggingface/datasets/issues/1769
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
It may be a bug of multiprocessing with Datasets, when I disable the multiprocessing by set num_proc to None, everything works fine. The script I use is https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm_wwm.py Script args: ``` --model_name_or_path ../../../model/chinese-roberta-wwm-ext --train_file /nfs/volume-377-2/bert/data/test/train.txt --output_dir test --do_train --per_device_train_batch_size 2 --gradient_accumulation_steps 2 --learning_rate 1e-4 --max_steps 1000 --warmup_steps 10 --save_steps 1000 --save_total_limit 1 --seed 23333 --max_seq_length 512 --preprocessing_num_workers 2 --cache_dir /nfs/volume-377-2/bert/data/test/cache ``` Where the `/nfs/volume-377-2/bert/data/test/train.txt` is just a toy example with 10000 lines of random string, you should be able to reproduce this error esaily. Full Traceback: ``` Traceback (most recent call last): File "/nfs/volume-377-2/bert/transformers/examples/language-modeling/run_mlm_wwm.py", line 398, in <module> main() File "/nfs/volume-377-2/bert/transformers/examples/language-modeling/run_mlm_wwm.py", line 325, in main load_from_cache_file=not data_args.overwrite_cache, File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/dataset_dict.py", line 303, in map for k, dataset in self.items() File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/dataset_dict.py", line 303, in <dictcomp> for k, dataset in self.items() File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/arrow_dataset.py", line 1318, in map transformed_shards = [r.get() for r in results] File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/datasets/arrow_dataset.py", line 1318, in <listcomp> transformed_shards = [r.get() for r in results] File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/pool.py", line 644, in get raise self._value File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/pool.py", line 424, in _handle_tasks put(task) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/connection.py", line 209, in send self._send_bytes(_ForkingPickler.dumps(obj)) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/multiprocess/reduction.py", line 54, in dumps cls(buf, protocol, *args, **kwds).dump(obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 446, in dump StockPickler.dump(self, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 409, in dump self.save(obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1438, in save_function obj.__dict__, fkwdefaults), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 736, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1170, in save_cell pickler.save_reduce(_create_cell, (f,), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 736, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 521, in save self.save_reduce(obj=obj, *rv) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 605, in save_reduce save(cls) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 1365, in save_type obj.__bases__, _dict), obj=obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 610, in save_reduce save(args) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 751, in save_tuple save(element) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 476, in save f(self, obj) # Call unbound method with explicit self File "/home/luban/miniconda3/envs/py36/lib/python3.6/site-packages/dill/_dill.py", line 933, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 821, in save_dict self._batch_setitems(obj.items()) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 847, in _batch_setitems save(v) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 507, in save self.save_global(obj, rv) File "/home/luban/miniconda3/envs/py36/lib/python3.6/pickle.py", line 927, in save_global (obj, module_name, name)) _pickle.PicklingError: Can't pickle typing.Union[str, NoneType]: it's not the same object as typing.Union ```
2022-10-05T12:38:51Z
https://github.com/huggingface/datasets/issues/1769
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1769/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1768/comments
https://api.github.com/repos/huggingface/datasets/issues/1768/timeline
2021-01-25T09:14:59Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYwMDgyNzIx
closed
[]
false
1,768
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Mention kwargs in the Dataset Formatting docs
https://api.github.com/repos/huggingface/datasets/issues/1768/events
null
https://api.github.com/repos/huggingface/datasets/issues/1768/labels{/name}
2021-01-22T16:43:20Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1768.diff", "html_url": "https://github.com/huggingface/datasets/pull/1768", "merged_at": "2021-01-25T09:14:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/1768.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1768" }
792,150,745
[]
https://api.github.com/repos/huggingface/datasets/issues/1768
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, This was discussed in Issue #1762 where the docs didn't mention that keyword arguments to `datasets.Dataset.set_format()` are allowed. To prevent people from having to check the code/method docs, I just added a couple of lines in the docs. Please let me know your thoughts on this. Thanks, Gunjan @lhoestq
2021-01-31T12:33:10Z
https://github.com/huggingface/datasets/pull/1768
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1768/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1767/comments
https://api.github.com/repos/huggingface/datasets/issues/1767/timeline
2021-01-25T20:37:42Z
null
null
MDExOlB1bGxSZXF1ZXN0NTYwMDE2MzE2
closed
[]
false
1,767
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
Add Librispeech ASR
https://api.github.com/repos/huggingface/datasets/issues/1767/events
null
https://api.github.com/repos/huggingface/datasets/issues/1767/labels{/name}
2021-01-22T14:54:37Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1767.diff", "html_url": "https://github.com/huggingface/datasets/pull/1767", "merged_at": "2021-01-25T20:37:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/1767.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1767" }
792,068,497
[]
https://api.github.com/repos/huggingface/datasets/issues/1767
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds the librispeech asr dataset: https://www.tensorflow.org/datasets/catalog/librispeech There are 2 configs: "clean" and "other" whereas there are two "train" datasets for "clean", hence the name "train.100" and "train.360". As suggested by @lhoestq, due to the enormous size of the dataset in `.arrow` format, the speech files are not directly prepared to a float32-array, but instead just the path to the array file is stored.
2021-01-25T20:38:07Z
https://github.com/huggingface/datasets/pull/1767
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1767/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1766/comments
https://api.github.com/repos/huggingface/datasets/issues/1766/timeline
2021-02-02T10:38:06Z
null
completed
MDU6SXNzdWU3OTIwNDQxMDU=
closed
[]
null
1,766
{ "avatar_url": "https://avatars.githubusercontent.com/u/8089862?v=4", "events_url": "https://api.github.com/users/lamthuy/events{/privacy}", "followers_url": "https://api.github.com/users/lamthuy/followers", "following_url": "https://api.github.com/users/lamthuy/following{/other_user}", "gists_url": "https://api.github.com/users/lamthuy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lamthuy", "id": 8089862, "login": "lamthuy", "node_id": "MDQ6VXNlcjgwODk4NjI=", "organizations_url": "https://api.github.com/users/lamthuy/orgs", "received_events_url": "https://api.github.com/users/lamthuy/received_events", "repos_url": "https://api.github.com/users/lamthuy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lamthuy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lamthuy/subscriptions", "type": "User", "url": "https://api.github.com/users/lamthuy" }
Issues when run two programs compute the same metrics
https://api.github.com/repos/huggingface/datasets/issues/1766/events
null
https://api.github.com/repos/huggingface/datasets/issues/1766/labels{/name}
2021-01-22T14:22:55Z
null
false
null
null
792,044,105
[]
https://api.github.com/repos/huggingface/datasets/issues/1766
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I got the following error when running two different programs that both compute sacreblue metrics. It seems that both read/and/write to the same location (.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow) where it caches the batches: ``` File "train_matching_min.py", line 160, in <module>ch_9_label avg_loss = valid(epoch, args.batch, args.validation, args.with_label) File "train_matching_min.py", line 93, in valid bleu += eval.compute() File "/u/tlhoang/projects/seal/match/models/eval.py", line 23, in compute return self.metric.compute()['score'] File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/metric.py", line 387, in compute self._finalize() File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/metric.py", line 355, in _finalize self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 231, in read_files pa_table = self._read_files(files) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 170, in _read_files pa_table: pa.Table = self._get_dataset_from_filename(f_dict) File "/dccstor/know/anaconda3/lib/python3.7/site-packages/datasets/arrow_reader.py", line 299, in _get_dataset_from_filename pa_table = f.read_all() File "pyarrow/ipc.pxi", line 481, in pyarrow.lib.RecordBatchReader.read_all File "pyarrow/error.pxi", line 84, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Expected to read 1819307375 metadata bytes, but only read 454396 ```
2021-02-02T10:38:06Z
https://github.com/huggingface/datasets/issues/1766
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1766/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1765/comments
https://api.github.com/repos/huggingface/datasets/issues/1765/timeline
2021-01-23T03:44:14Z
null
completed
MDU6SXNzdWU3OTE1NTMwNjU=
closed
[]
null
1,765
{ "avatar_url": "https://avatars.githubusercontent.com/u/1295082?v=4", "events_url": "https://api.github.com/users/EvanZ/events{/privacy}", "followers_url": "https://api.github.com/users/EvanZ/followers", "following_url": "https://api.github.com/users/EvanZ/following{/other_user}", "gists_url": "https://api.github.com/users/EvanZ/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/EvanZ", "id": 1295082, "login": "EvanZ", "node_id": "MDQ6VXNlcjEyOTUwODI=", "organizations_url": "https://api.github.com/users/EvanZ/orgs", "received_events_url": "https://api.github.com/users/EvanZ/received_events", "repos_url": "https://api.github.com/users/EvanZ/repos", "site_admin": false, "starred_url": "https://api.github.com/users/EvanZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/EvanZ/subscriptions", "type": "User", "url": "https://api.github.com/users/EvanZ" }
Error iterating over Dataset with DataLoader
https://api.github.com/repos/huggingface/datasets/issues/1765/events
null
https://api.github.com/repos/huggingface/datasets/issues/1765/labels{/name}
2021-01-21T22:56:45Z
null
false
null
null
791,553,065
[]
https://api.github.com/repos/huggingface/datasets/issues/1765
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I have a Dataset that I've mapped a tokenizer over: ``` encoded_dataset.set_format(type='torch',columns=['attention_mask','input_ids','token_type_ids']) encoded_dataset[:1] ``` ``` {'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), 'input_ids': tensor([[ 101, 178, 1198, 1400, 1714, 22233, 21365, 4515, 8618, 1113, 102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` When I try to iterate as in the docs, I get errors: ``` dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_sampler=32) next(iter(dataloader)) ``` ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-45-05180ba8aa35> in <module>() 1 dataloader = torch.utils.data.DataLoader(encoded_dataset, batch_sampler=32) ----> 2 next(iter(dataloader)) 3 frames /usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __init__(self, loader) 411 self._timeout = loader.timeout 412 self._collate_fn = loader.collate_fn --> 413 self._sampler_iter = iter(self._index_sampler) 414 self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item() 415 self._persistent_workers = loader.persistent_workers TypeError: 'int' object is not iterable ```
2022-10-28T02:16:38Z
https://github.com/huggingface/datasets/issues/1765
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1765/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1764/comments
https://api.github.com/repos/huggingface/datasets/issues/1764/timeline
2021-01-21T21:00:02Z
null
completed
MDU6SXNzdWU3OTE0ODY4NjA=
closed
[]
null
1,764
{ "avatar_url": "https://avatars.githubusercontent.com/u/12455298?v=4", "events_url": "https://api.github.com/users/SaeedNajafi/events{/privacy}", "followers_url": "https://api.github.com/users/SaeedNajafi/followers", "following_url": "https://api.github.com/users/SaeedNajafi/following{/other_user}", "gists_url": "https://api.github.com/users/SaeedNajafi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SaeedNajafi", "id": 12455298, "login": "SaeedNajafi", "node_id": "MDQ6VXNlcjEyNDU1Mjk4", "organizations_url": "https://api.github.com/users/SaeedNajafi/orgs", "received_events_url": "https://api.github.com/users/SaeedNajafi/received_events", "repos_url": "https://api.github.com/users/SaeedNajafi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SaeedNajafi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SaeedNajafi/subscriptions", "type": "User", "url": "https://api.github.com/users/SaeedNajafi" }
Connection Issues
https://api.github.com/repos/huggingface/datasets/issues/1764/events
null
https://api.github.com/repos/huggingface/datasets/issues/1764/labels{/name}
2021-01-21T20:56:09Z
null
false
null
null
791,486,860
[]
https://api.github.com/repos/huggingface/datasets/issues/1764
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Today, I am getting connection issues while loading a dataset and the metric. ``` Traceback (most recent call last): File "src/train.py", line 180, in <module> train_dataset, dev_dataset, test_dataset = create_race_dataset() File "src/train.py", line 130, in create_race_dataset train_dataset = load_dataset("race", "all", split="train") File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 591, in load_dataset path, script_version=script_version, download_config=download_config, download_mode=download_mode, dataset=True File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/datasets/race/race.py ``` Or ``` Traceback (most recent call last): File "src/train.py", line 105, in <module> rouge = datasets.load_metric("rouge") File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 500, in load_metric dataset=False, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/load.py", line 267, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 343, in cached_path max_retries=download_config.max_retries, File "/Users/saeed/Desktop/codes/repos/dreamscape-qa/env/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 617, in get_from_cache raise ConnectionError("Couldn't reach {}".format(url)) ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.1/metrics/rouge/rouge.py ```
2021-01-21T21:00:19Z
https://github.com/huggingface/datasets/issues/1764
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1764/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1763/comments
https://api.github.com/repos/huggingface/datasets/issues/1763/timeline
2021-01-22T10:13:45Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU5NDU3MTY1
closed
[]
false
1,763
{ "avatar_url": "https://avatars.githubusercontent.com/u/9641196?v=4", "events_url": "https://api.github.com/users/gowtham1997/events{/privacy}", "followers_url": "https://api.github.com/users/gowtham1997/followers", "following_url": "https://api.github.com/users/gowtham1997/following{/other_user}", "gists_url": "https://api.github.com/users/gowtham1997/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gowtham1997", "id": 9641196, "login": "gowtham1997", "node_id": "MDQ6VXNlcjk2NDExOTY=", "organizations_url": "https://api.github.com/users/gowtham1997/orgs", "received_events_url": "https://api.github.com/users/gowtham1997/received_events", "repos_url": "https://api.github.com/users/gowtham1997/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gowtham1997/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gowtham1997/subscriptions", "type": "User", "url": "https://api.github.com/users/gowtham1997" }
PAWS-X: Fix csv Dictreader splitting data on quotes
https://api.github.com/repos/huggingface/datasets/issues/1763/events
null
https://api.github.com/repos/huggingface/datasets/issues/1763/labels{/name}
2021-01-21T18:21:01Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1763.diff", "html_url": "https://github.com/huggingface/datasets/pull/1763", "merged_at": "2021-01-22T10:13:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/1763.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1763" }
791,389,763
[]
https://api.github.com/repos/huggingface/datasets/issues/1763
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
```python from datasets import load_dataset # load english paws-x dataset datasets = load_dataset('paws-x', 'en') print(len(datasets['train'])) # outputs 49202 but official dataset has 49401 pairs print(datasets['train'].unique('label')) # outputs [1, 0, -1] but labels are binary [0,1] ``` changed `data = csv.DictReader(f, delimiter="\t")` to `data = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)` in the dataloader to make csv module not split by quotes. The results are as expected for all languages after the change.
2021-01-22T10:14:33Z
https://github.com/huggingface/datasets/pull/1763
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1763/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1762/comments
https://api.github.com/repos/huggingface/datasets/issues/1762/timeline
2021-02-02T07:13:22Z
null
completed
MDU6SXNzdWU3OTEyMjYwMDc=
closed
[]
null
1,762
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Unable to format dataset to CUDA Tensors
https://api.github.com/repos/huggingface/datasets/issues/1762/events
null
https://api.github.com/repos/huggingface/datasets/issues/1762/labels{/name}
2021-01-21T15:31:23Z
null
false
null
null
791,226,007
[]
https://api.github.com/repos/huggingface/datasets/issues/1762
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, I came across this [link](https://huggingface.co/docs/datasets/torch_tensorflow.html) where the docs show show to convert a dataset to a particular format. I see that there is an option to convert it to tensors, but I don't see any option to convert it to CUDA tensors. I tried this, but Dataset doesn't support assignment: ``` columns=['input_ids', 'token_type_ids', 'attention_mask', 'start_positions','end_positions'] samples.set_format(type='torch', columns = columns) for column in columns: samples[column].to(torch.device(self.config.device)) ``` There should be an option to do so, or if there is already a way to do this, please let me know. Thanks, Gunjan
2021-02-02T07:13:22Z
https://github.com/huggingface/datasets/issues/1762
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1762/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1761/comments
https://api.github.com/repos/huggingface/datasets/issues/1761/timeline
2021-01-26T13:50:31Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU5MjUyMzEw
closed
[]
false
1,761
{ "avatar_url": "https://avatars.githubusercontent.com/u/1551356?v=4", "events_url": "https://api.github.com/users/eusip/events{/privacy}", "followers_url": "https://api.github.com/users/eusip/followers", "following_url": "https://api.github.com/users/eusip/following{/other_user}", "gists_url": "https://api.github.com/users/eusip/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eusip", "id": 1551356, "login": "eusip", "node_id": "MDQ6VXNlcjE1NTEzNTY=", "organizations_url": "https://api.github.com/users/eusip/orgs", "received_events_url": "https://api.github.com/users/eusip/received_events", "repos_url": "https://api.github.com/users/eusip/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eusip/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eusip/subscriptions", "type": "User", "url": "https://api.github.com/users/eusip" }
Add SILICONE benchmark
https://api.github.com/repos/huggingface/datasets/issues/1761/events
null
https://api.github.com/repos/huggingface/datasets/issues/1761/labels{/name}
2021-01-21T14:29:12Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1761.diff", "html_url": "https://github.com/huggingface/datasets/pull/1761", "merged_at": "2021-01-26T13:50:31Z", "patch_url": "https://github.com/huggingface/datasets/pull/1761.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1761" }
791,150,858
[]
https://api.github.com/repos/huggingface/datasets/issues/1761
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
My collaborators and I within the Affective Computing team at Telecom Paris would like to re-submit our spoken dialogue dataset for publication. This is a new pull request relative to the [previously closed request](https://github.com/huggingface/datasets/pull/1712) which was reviewed by @lhoestq.
2021-02-04T14:32:48Z
https://github.com/huggingface/datasets/pull/1761
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1761/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1760/comments
https://api.github.com/repos/huggingface/datasets/issues/1760/timeline
2021-01-22T09:40:00Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU5MjE3MjY0
closed
[]
false
1,760
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
More tags
https://api.github.com/repos/huggingface/datasets/issues/1760/events
null
https://api.github.com/repos/huggingface/datasets/issues/1760/labels{/name}
2021-01-21T13:50:10Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1760.diff", "html_url": "https://github.com/huggingface/datasets/pull/1760", "merged_at": "2021-01-22T09:40:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/1760.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1760" }
791,110,857
[]
https://api.github.com/repos/huggingface/datasets/issues/1760
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Since the hub v2 is going to be released soon I figured it would be great to add the missing tags at least for some of the datasets of reference listed [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md#write-the-loadingprocessing-code)
2021-01-22T09:40:01Z
https://github.com/huggingface/datasets/pull/1760
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1760/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1759/comments
https://api.github.com/repos/huggingface/datasets/issues/1759/timeline
2021-01-21T17:21:06Z
null
completed
MDU6SXNzdWU3OTA5OTIyMjY=
closed
[]
null
1,759
{ "avatar_url": "https://avatars.githubusercontent.com/u/19912393?v=4", "events_url": "https://api.github.com/users/ChrisDelClea/events{/privacy}", "followers_url": "https://api.github.com/users/ChrisDelClea/followers", "following_url": "https://api.github.com/users/ChrisDelClea/following{/other_user}", "gists_url": "https://api.github.com/users/ChrisDelClea/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChrisDelClea", "id": 19912393, "login": "ChrisDelClea", "node_id": "MDQ6VXNlcjE5OTEyMzkz", "organizations_url": "https://api.github.com/users/ChrisDelClea/orgs", "received_events_url": "https://api.github.com/users/ChrisDelClea/received_events", "repos_url": "https://api.github.com/users/ChrisDelClea/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChrisDelClea/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChrisDelClea/subscriptions", "type": "User", "url": "https://api.github.com/users/ChrisDelClea" }
wikipedia dataset incomplete
https://api.github.com/repos/huggingface/datasets/issues/1759/events
null
https://api.github.com/repos/huggingface/datasets/issues/1759/labels{/name}
2021-01-21T11:47:15Z
null
false
null
null
790,992,226
[]
https://api.github.com/repos/huggingface/datasets/issues/1759
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hey guys, I am using the https://github.com/huggingface/datasets/tree/master/datasets/wikipedia dataset. Unfortunately, I found out that there is an incompleteness for the German dataset. For reasons unknown to me, the number of inhabitants has been removed from many pages: Thorey-sur-Ouche has 128 inhabitants according to the webpage (https://de.wikipedia.org/wiki/Thorey-sur-Ouche). The pickle file however shows: französische Gemeinde mit Einwohnern (Stand). Is it possible to fix this? Best regards Chris
2021-01-21T17:22:11Z
https://github.com/huggingface/datasets/issues/1759
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1759/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1758/comments
https://api.github.com/repos/huggingface/datasets/issues/1758/timeline
2021-01-22T00:25:50Z
null
completed
MDU6SXNzdWU3OTA2MjYxMTY=
closed
[]
null
1,758
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85" }
dataset.search() (elastic) cannot reliably retrieve search results
https://api.github.com/repos/huggingface/datasets/issues/1758/events
null
https://api.github.com/repos/huggingface/datasets/issues/1758/labels{/name}
2021-01-21T02:26:37Z
null
false
null
null
790,626,116
[]
https://api.github.com/repos/huggingface/datasets/issues/1758
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I am trying to use elastic search to retrieve the indices of items in the dataset in their precise order, given shuffled training indices. The problem I have is that I cannot retrieve reliable results with my data on my first search. I have to run the search **twice** to get the right answer. I am indexing data that looks like the following from the HF SQuAD 2.0 data set: ``` ['57318658e6313a140071d02b', '56f7165e3d8e2e1400e3733a', '570e2f6e0b85d914000d7d21', '5727e58aff5b5019007d97d0', '5a3b5a503ff257001ab8441f', '57262fab271a42140099d725'] ``` To reproduce the issue, try: ``` from datasets import load_dataset, load_metric from transformers import BertTokenizerFast, BertForQuestionAnswering from elasticsearch import Elasticsearch import numpy as np import collections from tqdm.auto import tqdm import torch # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed. pad_on_right = tokenizer.padding_side == "right" squad_v2 = True # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (list(o) if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples # build base examples, features set of training data shuffled_idx = pd.read_csv('https://raw.githubusercontent.com/afogarty85/temp/main/idx.csv')['idx'].to_list() examples = load_dataset("squad_v2").shuffle(seed=1)['train'] features = load_dataset("squad_v2").shuffle(seed=1)['train'].map( prepare_validation_features, batched=True, remove_columns=['answers', 'context', 'id', 'question', 'title']) # reorder features by the training process features = features.select(indices=shuffled_idx) # get the example ids to match with the "example" data; get unique entries id_list = list(dict.fromkeys(features['example_id'])) # now search for their index positions in the examples data set; load elastic search es = Elasticsearch([{'host': 'localhost'}]).ping() # add an index to the id column for the examples examples.add_elasticsearch_index(column='id') # retrieve the example index example_idx_k1 = [examples.search(index_name='id', query=i, k=1).indices for i in id_list] example_idx_k1 = [item for sublist in example_idx_k1 for item in sublist] example_idx_k2 = [examples.search(index_name='id', query=i, k=3).indices for i in id_list] example_idx_k2 = [item for sublist in example_idx_k2 for item in sublist] len(example_idx_k1) # should be 130319 len(example_idx_k2) # should be 130319 #trial 1 lengths: # k=1: 130314 # k=3: 130319 # trial 2: # just run k=3 first: 130310 # try k=1 after k=3: 130319 ```
2021-01-22T00:25:50Z
https://github.com/huggingface/datasets/issues/1758
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1758/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1757/comments
https://api.github.com/repos/huggingface/datasets/issues/1757/timeline
2021-03-08T14:34:52Z
null
completed
MDU6SXNzdWU3OTA0NjY1MDk=
closed
[]
null
1,757
{ "avatar_url": "https://avatars.githubusercontent.com/u/6183050?v=4", "events_url": "https://api.github.com/users/dspoka/events{/privacy}", "followers_url": "https://api.github.com/users/dspoka/followers", "following_url": "https://api.github.com/users/dspoka/following{/other_user}", "gists_url": "https://api.github.com/users/dspoka/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dspoka", "id": 6183050, "login": "dspoka", "node_id": "MDQ6VXNlcjYxODMwNTA=", "organizations_url": "https://api.github.com/users/dspoka/orgs", "received_events_url": "https://api.github.com/users/dspoka/received_events", "repos_url": "https://api.github.com/users/dspoka/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dspoka/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dspoka/subscriptions", "type": "User", "url": "https://api.github.com/users/dspoka" }
FewRel
https://api.github.com/repos/huggingface/datasets/issues/1757/events
null
https://api.github.com/repos/huggingface/datasets/issues/1757/labels{/name}
2021-01-20T23:56:03Z
null
false
null
null
790,466,509
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
https://api.github.com/repos/huggingface/datasets/issues/1757
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
## Adding a Dataset - **Name:** FewRel - **Description:** Large-Scale Supervised Few-Shot Relation Classification Dataset - **Paper:** @inproceedings{han2018fewrel, title={FewRel:A Large-Scale Supervised Few-Shot Relation Classification Dataset with State-of-the-Art Evaluation}, author={Han, Xu and Zhu, Hao and Yu, Pengfei and Wang, Ziyun and Yao, Yuan and Liu, Zhiyuan and Sun, Maosong}, booktitle={EMNLP}, year={2018}} - **Data:** https://github.com/ProKil/FewRel - **Motivation:** relationship extraction dataset that's been used by some state of the art systems that should be incorporated. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
2021-03-09T02:52:05Z
https://github.com/huggingface/datasets/issues/1757
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1757/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1756/comments
https://api.github.com/repos/huggingface/datasets/issues/1756/timeline
2021-03-01T10:36:21Z
null
completed
MDU6SXNzdWU3OTAzODAwMjg=
closed
[]
null
1,756
{ "avatar_url": "https://avatars.githubusercontent.com/u/47894090?v=4", "events_url": "https://api.github.com/users/flozi00/events{/privacy}", "followers_url": "https://api.github.com/users/flozi00/followers", "following_url": "https://api.github.com/users/flozi00/following{/other_user}", "gists_url": "https://api.github.com/users/flozi00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/flozi00", "id": 47894090, "login": "flozi00", "node_id": "MDQ6VXNlcjQ3ODk0MDkw", "organizations_url": "https://api.github.com/users/flozi00/orgs", "received_events_url": "https://api.github.com/users/flozi00/received_events", "repos_url": "https://api.github.com/users/flozi00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/flozi00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/flozi00/subscriptions", "type": "User", "url": "https://api.github.com/users/flozi00" }
Ccaligned multilingual translation dataset
https://api.github.com/repos/huggingface/datasets/issues/1756/events
null
https://api.github.com/repos/huggingface/datasets/issues/1756/labels{/name}
2021-01-20T22:18:44Z
null
false
null
null
790,380,028
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
https://api.github.com/repos/huggingface/datasets/issues/1756
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - CCAligned consists of parallel or comparable web-document pairs in 137 languages aligned with English. These web-document pairs were constructed by performing language identification on raw web-documents, and ensuring corresponding language codes were corresponding in the URLs of web documents. This pattern matching approach yielded more than 100 million aligned documents paired with English. Recognizing that each English document was often aligned to mulitple documents in different target language, we can join on English documents to obtain aligned documents that directly pair two non-English documents (e.g., Arabic-French). - **Paper:** *link to the dataset paper if available* - https://www.aclweb.org/anthology/2020.emnlp-main.480.pdf - **Data:** *link to the Github repository or current dataset location* - http://www.statmt.org/cc-aligned/ - **Motivation:** *what are some good reasons to have this dataset* - The authors says it's an high quality dataset. - it's pretty large and includes many language pairs. It could be interesting training mt5 on this task. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
2021-03-01T10:36:21Z
https://github.com/huggingface/datasets/issues/1756
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1756/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1755/comments
https://api.github.com/repos/huggingface/datasets/issues/1755/timeline
2021-01-20T22:03:39Z
null
completed
MDU6SXNzdWU3OTAzMjQ3MzQ=
closed
[]
null
1,755
{ "avatar_url": "https://avatars.githubusercontent.com/u/49048309?v=4", "events_url": "https://api.github.com/users/afogarty85/events{/privacy}", "followers_url": "https://api.github.com/users/afogarty85/followers", "following_url": "https://api.github.com/users/afogarty85/following{/other_user}", "gists_url": "https://api.github.com/users/afogarty85/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/afogarty85", "id": 49048309, "login": "afogarty85", "node_id": "MDQ6VXNlcjQ5MDQ4MzA5", "organizations_url": "https://api.github.com/users/afogarty85/orgs", "received_events_url": "https://api.github.com/users/afogarty85/received_events", "repos_url": "https://api.github.com/users/afogarty85/repos", "site_admin": false, "starred_url": "https://api.github.com/users/afogarty85/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/afogarty85/subscriptions", "type": "User", "url": "https://api.github.com/users/afogarty85" }
Using select/reordering datasets slows operations down immensely
https://api.github.com/repos/huggingface/datasets/issues/1755/events
null
https://api.github.com/repos/huggingface/datasets/issues/1755/labels{/name}
2021-01-20T21:12:12Z
null
false
null
null
790,324,734
[]
https://api.github.com/repos/huggingface/datasets/issues/1755
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I am using portions of HF's helpful work in preparing / scoring the SQuAD 2.0 data. The problem I have is that after using `select` to re-ordering the dataset, computations slow down immensely where the total scoring process on 131k training examples would take maybe 3 minutes, now take over an hour. The below example should be reproducible and I have ran myself down this path because I want to use HF's scoring functions and helpful data preparation, but use my own trainer. The training process uses shuffle and therefore the order I trained on no longer matches the original data set order. So, to score my results correctly, the original data set needs to match the order of the training. This requires that I: (1) collect the index for each row of data emitted during training, and (2) use this index information to re-order the datasets correctly so the orders match when I go to score. The problem is, the dataset class starts performing very poorly as soon as you start manipulating its order by immense magnitudes. ``` from datasets import load_dataset, load_metric from transformers import BertTokenizerFast, BertForQuestionAnswering from elasticsearch import Elasticsearch import numpy as np import collections from tqdm.auto import tqdm import torch # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed. pad_on_right = tokenizer.padding_side == "right" squad_v2 = True # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (list(o) if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples # from https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb#scrollTo=941LPhDWeYv- def postprocess_qa_predictions(examples, features, starting_logits, ending_logits, n_best_size = 20, max_answer_length = 30): all_start_logits, all_end_logits = starting_logits, ending_logits # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. predictions = collections.OrderedDict() # Logging. print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None # Only used if squad_v2 is True. valid_answers = [] context = example["context"] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Update minimum null prediction. cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id) feature_null_score = start_logits[cls_index] + end_logits[cls_index] if min_null_score is None or min_null_score < feature_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char: end_char] } ) if len(valid_answers) > 0: best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0] else: # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. best_answer = {"text": "", "score": 0.0} # Let's pick our final answer: the best one or the null answer (only for squad_v2) if not squad_v2: predictions[example["id"]] = best_answer["text"] else: answer = best_answer["text"] if best_answer["score"] > min_null_score else "" predictions[example["id"]] = answer return predictions # build base examples, features from training data examples = load_dataset("squad_v2").shuffle(seed=5)['train'] features = load_dataset("squad_v2").shuffle(seed=5)['train'].map( prepare_validation_features, batched=True, remove_columns=['answers', 'context', 'id', 'question', 'title']) # sim some shuffled training indices that we want to use to re-order the data to compare how we did shuffle_idx = np.arange(0, 131754) np.random.shuffle(shuffle_idx) # create a new dataset with rows selected following the training shuffle features = features.select(indices=shuffle_idx) # get unique example ids to match with the "example" data id_list = list(dict.fromkeys(features['example_id'])) # now search for their index positions; load elastic search es = Elasticsearch([{'host': 'localhost'}]).ping() # add an index to the id column for the examples examples.add_elasticsearch_index(column='id') # search the examples for their index position example_idx = [examples.search(index_name='id', query=i, k=1).indices for i in id_list] # drop the elastic search examples.drop_index(index_name='id') # put examples in the right order examples = examples.select(indices=example_idx) # generate some fake data logits = {'starting_logits': torch.randn(131754, 384), 'ending_logits': torch.randn(131754, 384)} def score_squad(logits, n_best_size, max_answer): # proceed with QA calculation final_predictions = postprocess_qa_predictions(examples=examples, features=features, starting_logits=logits['starting_logits'], ending_logits=logits['ending_logits'], n_best_size=20, max_answer_length=30) metric = load_metric("squad_v2") formatted_predictions = [{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in final_predictions.items()] references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples] metrics = metric.compute(predictions=formatted_predictions, references=references) return metrics metrics = score_squad(logits, n_best_size=20, max_answer=30) ```
2021-01-20T22:03:39Z
https://github.com/huggingface/datasets/issues/1755
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1755/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1754/comments
https://api.github.com/repos/huggingface/datasets/issues/1754/timeline
2021-01-25T09:12:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU4MTU5NjEw
closed
[]
false
1,754
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Use a config id in the cache directory names for custom configs
https://api.github.com/repos/huggingface/datasets/issues/1754/events
null
https://api.github.com/repos/huggingface/datasets/issues/1754/labels{/name}
2021-01-20T11:11:00Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1754.diff", "html_url": "https://github.com/huggingface/datasets/pull/1754", "merged_at": "2021-01-25T09:12:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/1754.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1754" }
789,881,730
[]
https://api.github.com/repos/huggingface/datasets/issues/1754
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed by @JetRunner there was some issues when trying to generate a dataset using a custom config that is based on an existing config. For example in the following code the `mnli_custom` would reuse the cache used to create `mnli` instead of generating a new dataset with the new label classes: ```python from datasets import load_dataset mnli = load_dataset("glue", "mnli") mnli_custom = load_dataset("glue", "mnli", label_classes=["contradiction", "entailment", "neutral"]) ``` I fixed that by extending the cache directory definition of a dataset that is being generated. Instead of using the config name in the cache directory name, I switched to using a `config_id`. By default it is equal to the config name. However the name of a config is not sufficent to have a unique identifier for the dataset being generated since it doesn't take into account: - the config kwargs that can be used to overwrite attributes - the custom features used to write the dataset - the data_files for json/text/csv/pandas datasets Therefore the config id is just the config name with an optional suffix based on these. In particular taking into account the config kwargs fixes the issue with the `label_classes` above. I completed the current test cases by adding the case that was missing: overwriting an already existing config.
2021-01-25T09:12:07Z
https://github.com/huggingface/datasets/pull/1754
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1754/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1753/comments
https://api.github.com/repos/huggingface/datasets/issues/1753/timeline
2021-01-20T14:39:30Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU4MTQ3Njkx
closed
[]
false
1,753
{ "avatar_url": "https://avatars.githubusercontent.com/u/17256847?v=4", "events_url": "https://api.github.com/users/ricardorei/events{/privacy}", "followers_url": "https://api.github.com/users/ricardorei/followers", "following_url": "https://api.github.com/users/ricardorei/following{/other_user}", "gists_url": "https://api.github.com/users/ricardorei/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ricardorei", "id": 17256847, "login": "ricardorei", "node_id": "MDQ6VXNlcjE3MjU2ODQ3", "organizations_url": "https://api.github.com/users/ricardorei/orgs", "received_events_url": "https://api.github.com/users/ricardorei/received_events", "repos_url": "https://api.github.com/users/ricardorei/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ricardorei/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ricardorei/subscriptions", "type": "User", "url": "https://api.github.com/users/ricardorei" }
fix comet citations
https://api.github.com/repos/huggingface/datasets/issues/1753/events
null
https://api.github.com/repos/huggingface/datasets/issues/1753/labels{/name}
2021-01-20T10:52:38Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1753.diff", "html_url": "https://github.com/huggingface/datasets/pull/1753", "merged_at": "2021-01-20T14:39:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/1753.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1753" }
789,867,685
[]
https://api.github.com/repos/huggingface/datasets/issues/1753
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
I realized COMET citations were not showing in the hugging face metrics page: <img width="814" alt="Screenshot 2021-01-20 at 09 48 44" src="https://user-images.githubusercontent.com/17256847/105164848-8b9da900-5b0d-11eb-9e20-a38f559d2037.png"> This pull request is intended to fix that. Thanks!
2021-01-20T14:39:30Z
https://github.com/huggingface/datasets/pull/1753
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1753/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1752/comments
https://api.github.com/repos/huggingface/datasets/issues/1752/timeline
2021-01-20T10:25:02Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU4MTA5NTA5
closed
[]
false
1,752
{ "avatar_url": "https://avatars.githubusercontent.com/u/17256847?v=4", "events_url": "https://api.github.com/users/ricardorei/events{/privacy}", "followers_url": "https://api.github.com/users/ricardorei/followers", "following_url": "https://api.github.com/users/ricardorei/following{/other_user}", "gists_url": "https://api.github.com/users/ricardorei/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ricardorei", "id": 17256847, "login": "ricardorei", "node_id": "MDQ6VXNlcjE3MjU2ODQ3", "organizations_url": "https://api.github.com/users/ricardorei/orgs", "received_events_url": "https://api.github.com/users/ricardorei/received_events", "repos_url": "https://api.github.com/users/ricardorei/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ricardorei/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ricardorei/subscriptions", "type": "User", "url": "https://api.github.com/users/ricardorei" }
COMET metric citation
https://api.github.com/repos/huggingface/datasets/issues/1752/events
null
https://api.github.com/repos/huggingface/datasets/issues/1752/labels{/name}
2021-01-20T09:54:43Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1752.diff", "html_url": "https://github.com/huggingface/datasets/pull/1752", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1752.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1752" }
789,822,459
[]
https://api.github.com/repos/huggingface/datasets/issues/1752
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
In my last pull request to add COMET metric, the citations where not following the usual "format". Because of that they where not correctly displayed on the website: <img width="814" alt="Screenshot 2021-01-20 at 09 48 44" src="https://user-images.githubusercontent.com/17256847/105158000-686efb80-5b05-11eb-8bb0-9c85fdac2938.png"> This pull request is only intended to fix that.
2021-01-20T10:27:07Z
https://github.com/huggingface/datasets/pull/1752
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1752/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1751/comments
https://api.github.com/repos/huggingface/datasets/issues/1751/timeline
2021-01-20T14:56:52Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU3NjA1ODE2
closed
[]
false
1,751
{ "avatar_url": "https://avatars.githubusercontent.com/u/26722925?v=4", "events_url": "https://api.github.com/users/mcmillanmajora/events{/privacy}", "followers_url": "https://api.github.com/users/mcmillanmajora/followers", "following_url": "https://api.github.com/users/mcmillanmajora/following{/other_user}", "gists_url": "https://api.github.com/users/mcmillanmajora/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mcmillanmajora", "id": 26722925, "login": "mcmillanmajora", "node_id": "MDQ6VXNlcjI2NzIyOTI1", "organizations_url": "https://api.github.com/users/mcmillanmajora/orgs", "received_events_url": "https://api.github.com/users/mcmillanmajora/received_events", "repos_url": "https://api.github.com/users/mcmillanmajora/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mcmillanmajora/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mcmillanmajora/subscriptions", "type": "User", "url": "https://api.github.com/users/mcmillanmajora" }
Updated README for the Social Bias Frames dataset
https://api.github.com/repos/huggingface/datasets/issues/1751/events
null
https://api.github.com/repos/huggingface/datasets/issues/1751/labels{/name}
2021-01-19T17:53:00Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1751.diff", "html_url": "https://github.com/huggingface/datasets/pull/1751", "merged_at": "2021-01-20T14:56:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/1751.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1751" }
789,232,980
[]
https://api.github.com/repos/huggingface/datasets/issues/1751
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
See the updated card at https://github.com/mcmillanmajora/datasets/tree/add-SBIC-card/datasets/social_bias_frames. I incorporated information from the [SBIC data statement](https://homes.cs.washington.edu/~msap/social-bias-frames/DATASTATEMENT.html), paper, and the corpus README file included with the dataset download.
2021-01-20T14:56:52Z
https://github.com/huggingface/datasets/pull/1751
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1751/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1750/comments
https://api.github.com/repos/huggingface/datasets/issues/1750/timeline
2021-01-19T09:48:43Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU3MTM1MzM1
closed
[]
false
1,750
{ "avatar_url": "https://avatars.githubusercontent.com/u/2755894?v=4", "events_url": "https://api.github.com/users/forest1988/events{/privacy}", "followers_url": "https://api.github.com/users/forest1988/followers", "following_url": "https://api.github.com/users/forest1988/following{/other_user}", "gists_url": "https://api.github.com/users/forest1988/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/forest1988", "id": 2755894, "login": "forest1988", "node_id": "MDQ6VXNlcjI3NTU4OTQ=", "organizations_url": "https://api.github.com/users/forest1988/orgs", "received_events_url": "https://api.github.com/users/forest1988/received_events", "repos_url": "https://api.github.com/users/forest1988/repos", "site_admin": false, "starred_url": "https://api.github.com/users/forest1988/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/forest1988/subscriptions", "type": "User", "url": "https://api.github.com/users/forest1988" }
Fix typo in README.md of cnn_dailymail
https://api.github.com/repos/huggingface/datasets/issues/1750/events
null
https://api.github.com/repos/huggingface/datasets/issues/1750/labels{/name}
2021-01-19T03:06:05Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1750.diff", "html_url": "https://github.com/huggingface/datasets/pull/1750", "merged_at": "2021-01-19T09:48:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/1750.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1750" }
788,668,085
[]
https://api.github.com/repos/huggingface/datasets/issues/1750
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
When I read the README.md of `CNN/DailyMail Dataset`, there seems to be a typo `CCN`. I am afraid this is a trivial matter, but I would like to make a suggestion for revision.
2021-01-19T11:07:29Z
https://github.com/huggingface/datasets/pull/1750
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1750/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1749/comments
https://api.github.com/repos/huggingface/datasets/issues/1749/timeline
2021-01-29T18:38:08Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2OTgxMDc5
closed
[]
false
1,749
{ "avatar_url": "https://avatars.githubusercontent.com/u/22454783?v=4", "events_url": "https://api.github.com/users/gmihaila/events{/privacy}", "followers_url": "https://api.github.com/users/gmihaila/followers", "following_url": "https://api.github.com/users/gmihaila/following{/other_user}", "gists_url": "https://api.github.com/users/gmihaila/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gmihaila", "id": 22454783, "login": "gmihaila", "node_id": "MDQ6VXNlcjIyNDU0Nzgz", "organizations_url": "https://api.github.com/users/gmihaila/orgs", "received_events_url": "https://api.github.com/users/gmihaila/received_events", "repos_url": "https://api.github.com/users/gmihaila/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gmihaila/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gmihaila/subscriptions", "type": "User", "url": "https://api.github.com/users/gmihaila" }
Added metadata and correct splits for swda.
https://api.github.com/repos/huggingface/datasets/issues/1749/events
null
https://api.github.com/repos/huggingface/datasets/issues/1749/labels{/name}
2021-01-18T18:36:32Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1749.diff", "html_url": "https://github.com/huggingface/datasets/pull/1749", "merged_at": "2021-01-29T18:38:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/1749.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1749" }
788,476,639
[]
https://api.github.com/repos/huggingface/datasets/issues/1749
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Switchboard Dialog Act Corpus I made some changes following @bhavitvyamalik recommendation in #1678: * Contains all metadata. * Used official implementation from the [/swda](https://github.com/cgpotts/swda) repo. * Add official train and test splits used in [Stolcke et al. (2000)](https://web.stanford.edu/~jurafsky/ws97) and validation split used in [Probabilistic-RNN-DA-Classifier](https://github.com/NathanDuran/Probabilistic-RNN-DA-Classifier).
2021-01-29T19:35:52Z
https://github.com/huggingface/datasets/pull/1749
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 2, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1749/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1748/comments
https://api.github.com/repos/huggingface/datasets/issues/1748/timeline
2021-01-19T11:26:58Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2OTQ0NDEx
closed
[]
false
1,748
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu" }
add Stuctured Argument Extraction for Korean dataset
https://api.github.com/repos/huggingface/datasets/issues/1748/events
null
https://api.github.com/repos/huggingface/datasets/issues/1748/labels{/name}
2021-01-18T17:14:19Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1748.diff", "html_url": "https://github.com/huggingface/datasets/pull/1748", "merged_at": "2021-01-19T11:26:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/1748.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1748" }
788,431,642
[]
https://api.github.com/repos/huggingface/datasets/issues/1748
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
2021-09-17T16:53:18Z
https://github.com/huggingface/datasets/pull/1748
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1748/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1747/comments
https://api.github.com/repos/huggingface/datasets/issues/1747/timeline
2022-10-05T12:37:27Z
null
completed
MDU6SXNzdWU3ODgyOTk3NzU=
closed
[]
null
1,747
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
datasets slicing with seed
https://api.github.com/repos/huggingface/datasets/issues/1747/events
null
https://api.github.com/repos/huggingface/datasets/issues/1747/labels{/name}
2021-01-18T14:08:55Z
null
false
null
null
788,299,775
[]
https://api.github.com/repos/huggingface/datasets/issues/1747
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I need to slice a dataset with random seed, I looked into documentation here https://huggingface.co/docs/datasets/splits.html I could not find a seed option, could you assist me please how I can get a slice for different seeds? thank you. @lhoestq
2022-10-05T12:37:27Z
https://github.com/huggingface/datasets/issues/1747
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1747/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1746/comments
https://api.github.com/repos/huggingface/datasets/issues/1746/timeline
2021-01-18T11:31:23Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2NzQxMjIw
closed
[]
false
1,746
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix release conda worflow
https://api.github.com/repos/huggingface/datasets/issues/1746/events
null
https://api.github.com/repos/huggingface/datasets/issues/1746/labels{/name}
2021-01-18T11:29:10Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1746.diff", "html_url": "https://github.com/huggingface/datasets/pull/1746", "merged_at": "2021-01-18T11:31:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/1746.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1746" }
788,188,184
[]
https://api.github.com/repos/huggingface/datasets/issues/1746
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
The current workflow yaml file is not valid according to https://github.com/huggingface/datasets/actions/runs/487638110
2021-01-18T11:31:24Z
https://github.com/huggingface/datasets/pull/1746
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1746/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1745/comments
https://api.github.com/repos/huggingface/datasets/issues/1745/timeline
2021-01-18T00:59:34Z
null
completed
MDU6SXNzdWU3ODc4MzgyNTY=
closed
[]
null
1,745
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
difference between wsc and wsc.fixed for superglue
https://api.github.com/repos/huggingface/datasets/issues/1745/events
null
https://api.github.com/repos/huggingface/datasets/issues/1745/labels{/name}
2021-01-18T00:50:19Z
null
false
null
null
787,838,256
[]
https://api.github.com/repos/huggingface/datasets/issues/1745
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I see two versions of wsc in superglue, and I am not sure what is the differences and which one is the original one. could you help to discuss the differences? thanks @lhoestq
2021-01-18T11:02:43Z
https://github.com/huggingface/datasets/issues/1745
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1745/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1744/comments
https://api.github.com/repos/huggingface/datasets/issues/1744/timeline
2021-01-18T11:26:09Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2MzA0MjU4
closed
[]
false
1,744
{ "avatar_url": "https://avatars.githubusercontent.com/u/2238344?v=4", "events_url": "https://api.github.com/users/jbragg/events{/privacy}", "followers_url": "https://api.github.com/users/jbragg/followers", "following_url": "https://api.github.com/users/jbragg/following{/other_user}", "gists_url": "https://api.github.com/users/jbragg/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jbragg", "id": 2238344, "login": "jbragg", "node_id": "MDQ6VXNlcjIyMzgzNDQ=", "organizations_url": "https://api.github.com/users/jbragg/orgs", "received_events_url": "https://api.github.com/users/jbragg/received_events", "repos_url": "https://api.github.com/users/jbragg/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jbragg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jbragg/subscriptions", "type": "User", "url": "https://api.github.com/users/jbragg" }
Add missing "brief" entries to reuters
https://api.github.com/repos/huggingface/datasets/issues/1744/events
null
https://api.github.com/repos/huggingface/datasets/issues/1744/labels{/name}
2021-01-17T07:58:49Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1744.diff", "html_url": "https://github.com/huggingface/datasets/pull/1744", "merged_at": "2021-01-18T11:26:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/1744.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1744" }
787,649,811
[]
https://api.github.com/repos/huggingface/datasets/issues/1744
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This brings the number of examples for ModApte to match the stated `Training set (9,603 docs)...Test Set (3,299 docs)`
2021-01-18T11:26:09Z
https://github.com/huggingface/datasets/pull/1744
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1744/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1743/comments
https://api.github.com/repos/huggingface/datasets/issues/1743/timeline
2022-06-01T15:49:34Z
null
completed
MDU6SXNzdWU3ODc2MzE0MTI=
closed
[]
null
1,743
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Issue while Creating Custom Metric
https://api.github.com/repos/huggingface/datasets/issues/1743/events
null
https://api.github.com/repos/huggingface/datasets/issues/1743/labels{/name}
2021-01-17T07:01:14Z
null
false
null
null
787,631,412
[]
https://api.github.com/repos/huggingface/datasets/issues/1743
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi Team, I am trying to create a custom metric for my training as follows, where f1 is my own metric: ```python def _info(self): # TODO: Specifies the datasets.MetricInfo object return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features = datasets.Features({'predictions':datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")),"offset_mapping":datasets.Sequence(datasets.Value("int32")),'text':datasets.Sequence(datasets.Value('string')),"ground":datasets.Sequence(datasets.Value("int32")),}), # Homepage of the metric for documentation homepage="http://metric.homepage", # Additional links to the codebase or references codebase_urls=["http://github.com/path/to/codebase/of/new_metric"], reference_urls=["http://path.to.reference.url/new_metric"] ) def _compute(self,predictions,references,text,offset_mapping,spans): pred_spans = [] for i,preds in enumerate(predictions): current_preds = [] for j,token_preds in enumerate(preds): if (preds>0.5): current_preds+=list(range(offset_mapping[i][j][0],offset_mapping[i][j][1])) pred_spans.append(current_spans) return { "Token Wise F1": f1_score(references,predictions,labels=[0,1]), "Offset Wise F1": np.mean([f1(preds,gold) for preds,fold in zip(pred_spans,ground)]) } ``` I believe this is not correct. But that's not the issue I am facing right now. I get this error : ```python --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-144-ed7349b50821> in <module>() ----> 1 new_metric.compute(predictions=inputs["labels"],references=inputs["labels"], text=inputs["text"], offset_mapping=inputs["offset_mapping"],ground=inputs["ground"] ) 2 frames /usr/local/lib/python3.6/dist-packages/datasets/features.py in encode_batch(self, batch) 802 encoded_batch = {} 803 if set(batch) != set(self): --> 804 print(batch) 805 print(self) 806 raise ValueError("Column mismatch between batch {} and features {}".format(set(batch), set(self))) ValueError: Column mismatch between batch {'references', 'predictions'} and features {'ground', 'predictions', 'offset_mapping', 'text', 'references'} ``` On checking the features.py file, I see the call is made from add_batch() in metrics.py which only takes in predictions and references. How do I make my custom metric work? Will it work with a trainer even if I am able to make this metric work? Thanks, Gunjan
2022-06-01T15:49:34Z
https://github.com/huggingface/datasets/issues/1743
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1743/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1742/comments
https://api.github.com/repos/huggingface/datasets/issues/1742/timeline
2021-03-29T12:43:30Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2MjgyMDYw
closed
[]
false
1,742
{ "avatar_url": "https://avatars.githubusercontent.com/u/22514219?v=4", "events_url": "https://api.github.com/users/JetRunner/events{/privacy}", "followers_url": "https://api.github.com/users/JetRunner/followers", "following_url": "https://api.github.com/users/JetRunner/following{/other_user}", "gists_url": "https://api.github.com/users/JetRunner/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JetRunner", "id": 22514219, "login": "JetRunner", "node_id": "MDQ6VXNlcjIyNTE0MjE5", "organizations_url": "https://api.github.com/users/JetRunner/orgs", "received_events_url": "https://api.github.com/users/JetRunner/received_events", "repos_url": "https://api.github.com/users/JetRunner/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JetRunner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JetRunner/subscriptions", "type": "User", "url": "https://api.github.com/users/JetRunner" }
Add GLUE Compat (compatible with transformers<3.5.0)
https://api.github.com/repos/huggingface/datasets/issues/1742/events
null
https://api.github.com/repos/huggingface/datasets/issues/1742/labels{/name}
2021-01-17T05:54:25Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1742.diff", "html_url": "https://github.com/huggingface/datasets/pull/1742", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1742.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1742" }
787,623,640
[]
https://api.github.com/repos/huggingface/datasets/issues/1742
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Link to our discussion on Slack (HF internal) https://huggingface.slack.com/archives/C014N4749J9/p1609668119337400 The next step is to add a compatible option in the new `run_glue.py` I duplicated `glue` and made the following changes: 1. Change the name to `glue_compat`. 2. Change the label assignments for MNLI and AX.
2023-09-24T09:52:12Z
https://github.com/huggingface/datasets/pull/1742
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1742/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1741/comments
https://api.github.com/repos/huggingface/datasets/issues/1741/timeline
2021-01-16T02:39:18Z
null
completed
MDU6SXNzdWU3ODczMjcwNjA=
closed
[]
null
1,741
{ "avatar_url": "https://avatars.githubusercontent.com/u/43234824?v=4", "events_url": "https://api.github.com/users/XiaoYang66/events{/privacy}", "followers_url": "https://api.github.com/users/XiaoYang66/followers", "following_url": "https://api.github.com/users/XiaoYang66/following{/other_user}", "gists_url": "https://api.github.com/users/XiaoYang66/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/XiaoYang66", "id": 43234824, "login": "XiaoYang66", "node_id": "MDQ6VXNlcjQzMjM0ODI0", "organizations_url": "https://api.github.com/users/XiaoYang66/orgs", "received_events_url": "https://api.github.com/users/XiaoYang66/received_events", "repos_url": "https://api.github.com/users/XiaoYang66/repos", "site_admin": false, "starred_url": "https://api.github.com/users/XiaoYang66/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/XiaoYang66/subscriptions", "type": "User", "url": "https://api.github.com/users/XiaoYang66" }
error when run fine_tuning on text_classification
https://api.github.com/repos/huggingface/datasets/issues/1741/events
null
https://api.github.com/repos/huggingface/datasets/issues/1741/labels{/name}
2021-01-16T02:23:19Z
null
false
null
null
787,327,060
[]
https://api.github.com/repos/huggingface/datasets/issues/1741
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
dataset:sem_eval_2014_task_1 pretrained_model:bert-base-uncased error description: when i use these resoruce to train fine_tuning a text_classification on sem_eval_2014_task_1,there always be some problem(when i use other dataset ,there exist the error too). And i followed the colab code (url:https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb#scrollTo=TlqNaB8jIrJW). the error is like this : `File "train.py", line 69, in <module> trainer.train() File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/transformers/trainer.py", line 784, in train for step, inputs in enumerate(epoch_iterator): File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 435, in __next__ data = self._next_data() File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 475, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/home/projects/anaconda3/envs/calibration/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] KeyError: 2` this is my code : ```dataset_name = 'sem_eval_2014_task_1' num_labels_size = 3 batch_size = 4 model_checkpoint = 'bert-base-uncased' number_train_epoch = 5 def tokenize(batch): return tokenizer(batch['premise'], batch['hypothesis'], truncation=True, ) def compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='micro') acc = accuracy_score(labels, preds) return { 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall } model = BertForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels_size) tokenizer = BertTokenizerFast.from_pretrained(model_checkpoint, use_fast=True) train_dataset = load_dataset(dataset_name, split='train') test_dataset = load_dataset(dataset_name, split='test') train_encoded_dataset = train_dataset.map(tokenize, batched=True) test_encoded_dataset = test_dataset.map(tokenize, batched=True) args = TrainingArguments( output_dir='./results', evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=number_train_epoch, weight_decay=0.01, do_predict=True, ) trainer = Trainer( model=model, args=args, compute_metrics=compute_metrics, train_dataset=train_encoded_dataset, eval_dataset=test_encoded_dataset, tokenizer=tokenizer ) trainer.train() trainer.evaluate()
2021-01-16T02:39:28Z
https://github.com/huggingface/datasets/issues/1741
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1741/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1740/comments
https://api.github.com/repos/huggingface/datasets/issues/1740/timeline
2021-01-20T13:41:26Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU2MDA5NjM1
closed
[]
false
1,740
{ "avatar_url": "https://avatars.githubusercontent.com/u/7669893?v=4", "events_url": "https://api.github.com/users/cahya-wirawan/events{/privacy}", "followers_url": "https://api.github.com/users/cahya-wirawan/followers", "following_url": "https://api.github.com/users/cahya-wirawan/following{/other_user}", "gists_url": "https://api.github.com/users/cahya-wirawan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cahya-wirawan", "id": 7669893, "login": "cahya-wirawan", "node_id": "MDQ6VXNlcjc2Njk4OTM=", "organizations_url": "https://api.github.com/users/cahya-wirawan/orgs", "received_events_url": "https://api.github.com/users/cahya-wirawan/received_events", "repos_url": "https://api.github.com/users/cahya-wirawan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cahya-wirawan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cahya-wirawan/subscriptions", "type": "User", "url": "https://api.github.com/users/cahya-wirawan" }
add id_liputan6 dataset
https://api.github.com/repos/huggingface/datasets/issues/1740/events
null
https://api.github.com/repos/huggingface/datasets/issues/1740/labels{/name}
2021-01-15T22:58:34Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1740.diff", "html_url": "https://github.com/huggingface/datasets/pull/1740", "merged_at": "2021-01-20T13:41:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/1740.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1740" }
787,264,605
[]
https://api.github.com/repos/huggingface/datasets/issues/1740
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
id_liputan6 is a large-scale Indonesian summarization dataset. The articles were harvested from an online news portal, and obtain 215,827 document-summary pairs: https://arxiv.org/abs/2011.00679
2021-01-20T13:41:26Z
https://github.com/huggingface/datasets/pull/1740
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1740/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1739/comments
https://api.github.com/repos/huggingface/datasets/issues/1739/timeline
2021-01-29T10:53:03Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU1OTY5Njgx
closed
[]
false
1,739
{ "avatar_url": "https://avatars.githubusercontent.com/u/9607332?v=4", "events_url": "https://api.github.com/users/Shimorina/events{/privacy}", "followers_url": "https://api.github.com/users/Shimorina/followers", "following_url": "https://api.github.com/users/Shimorina/following{/other_user}", "gists_url": "https://api.github.com/users/Shimorina/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Shimorina", "id": 9607332, "login": "Shimorina", "node_id": "MDQ6VXNlcjk2MDczMzI=", "organizations_url": "https://api.github.com/users/Shimorina/orgs", "received_events_url": "https://api.github.com/users/Shimorina/received_events", "repos_url": "https://api.github.com/users/Shimorina/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Shimorina/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Shimorina/subscriptions", "type": "User", "url": "https://api.github.com/users/Shimorina" }
fixes and improvements for the WebNLG loader
https://api.github.com/repos/huggingface/datasets/issues/1739/events
null
https://api.github.com/repos/huggingface/datasets/issues/1739/labels{/name}
2021-01-15T21:45:23Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1739.diff", "html_url": "https://github.com/huggingface/datasets/pull/1739", "merged_at": "2021-01-29T10:53:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/1739.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1739" }
787,219,138
[]
https://api.github.com/repos/huggingface/datasets/issues/1739
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
- fixes test sets loading in v3.0 - adds additional fields for v3.0_ru - adds info to the WebNLG data card
2021-01-29T14:34:06Z
https://github.com/huggingface/datasets/pull/1739
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1739/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1738/comments
https://api.github.com/repos/huggingface/datasets/issues/1738/timeline
2021-01-15T10:08:19Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU0OTk2NDU4
closed
[]
false
1,738
{ "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LysandreJik", "id": 30755778, "login": "LysandreJik", "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "repos_url": "https://api.github.com/users/LysandreJik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "type": "User", "url": "https://api.github.com/users/LysandreJik" }
Conda support
https://api.github.com/repos/huggingface/datasets/issues/1738/events
null
https://api.github.com/repos/huggingface/datasets/issues/1738/labels{/name}
2021-01-14T15:11:25Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1738.diff", "html_url": "https://github.com/huggingface/datasets/pull/1738", "merged_at": "2021-01-15T10:08:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/1738.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1738" }
786,068,440
[]
https://api.github.com/repos/huggingface/datasets/issues/1738
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Will push a new version on anaconda cloud every time a tag starting with `v` is pushed (like `v1.2.2`). Will appear here: https://anaconda.org/huggingface/datasets Depends on `conda-forge` for now, so the following is required for installation: ``` conda install -c huggingface -c conda-forge datasets ```
2021-01-15T10:08:20Z
https://github.com/huggingface/datasets/pull/1738
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 4, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/1738/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1737/comments
https://api.github.com/repos/huggingface/datasets/issues/1737/timeline
2021-01-14T10:25:24Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU0NjA2ODg5
closed
[]
false
1,737
{ "avatar_url": "https://avatars.githubusercontent.com/u/6429850?v=4", "events_url": "https://api.github.com/users/chameleonTK/events{/privacy}", "followers_url": "https://api.github.com/users/chameleonTK/followers", "following_url": "https://api.github.com/users/chameleonTK/following{/other_user}", "gists_url": "https://api.github.com/users/chameleonTK/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chameleonTK", "id": 6429850, "login": "chameleonTK", "node_id": "MDQ6VXNlcjY0Mjk4NTA=", "organizations_url": "https://api.github.com/users/chameleonTK/orgs", "received_events_url": "https://api.github.com/users/chameleonTK/received_events", "repos_url": "https://api.github.com/users/chameleonTK/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chameleonTK/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chameleonTK/subscriptions", "type": "User", "url": "https://api.github.com/users/chameleonTK" }
update link in TLC to be github links
https://api.github.com/repos/huggingface/datasets/issues/1737/events
null
https://api.github.com/repos/huggingface/datasets/issues/1737/labels{/name}
2021-01-14T02:49:21Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1737.diff", "html_url": "https://github.com/huggingface/datasets/pull/1737", "merged_at": "2021-01-14T10:25:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/1737.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1737" }
785,606,286
[]
https://api.github.com/repos/huggingface/datasets/issues/1737
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Base on this issue https://github.com/huggingface/datasets/issues/1064, I can now use the official links.
2021-01-14T10:25:24Z
https://github.com/huggingface/datasets/pull/1737
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1737/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1736/comments
https://api.github.com/repos/huggingface/datasets/issues/1736/timeline
2021-01-14T10:29:38Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU0NDYyNjYw
closed
[]
false
1,736
{ "avatar_url": "https://avatars.githubusercontent.com/u/5097052?v=4", "events_url": "https://api.github.com/users/jonatasgrosman/events{/privacy}", "followers_url": "https://api.github.com/users/jonatasgrosman/followers", "following_url": "https://api.github.com/users/jonatasgrosman/following{/other_user}", "gists_url": "https://api.github.com/users/jonatasgrosman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jonatasgrosman", "id": 5097052, "login": "jonatasgrosman", "node_id": "MDQ6VXNlcjUwOTcwNTI=", "organizations_url": "https://api.github.com/users/jonatasgrosman/orgs", "received_events_url": "https://api.github.com/users/jonatasgrosman/received_events", "repos_url": "https://api.github.com/users/jonatasgrosman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jonatasgrosman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jonatasgrosman/subscriptions", "type": "User", "url": "https://api.github.com/users/jonatasgrosman" }
Adjust BrWaC dataset features name
https://api.github.com/repos/huggingface/datasets/issues/1736/events
null
https://api.github.com/repos/huggingface/datasets/issues/1736/labels{/name}
2021-01-13T20:39:04Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1736.diff", "html_url": "https://github.com/huggingface/datasets/pull/1736", "merged_at": "2021-01-14T10:29:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/1736.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1736" }
785,433,854
[]
https://api.github.com/repos/huggingface/datasets/issues/1736
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
I added this dataset some days ago, and today I used it to train some models and realized that the names of the features aren't so good. Looking at the current features hierarchy, we have "paragraphs" with a list of "sentences" with a list of "sentences?!". But the actual hierarchy is a "text" with a list of "paragraphs" with a list of "sentences". I confused myself trying to use the dataset with these names. So I think it's better to change it.
2021-01-14T10:29:38Z
https://github.com/huggingface/datasets/pull/1736
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1736/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1735/comments
https://api.github.com/repos/huggingface/datasets/issues/1735/timeline
2021-01-14T15:16:00Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU0MjUzMDcw
closed
[]
false
1,735
{ "avatar_url": "https://avatars.githubusercontent.com/u/35901082?v=4", "events_url": "https://api.github.com/users/sgugger/events{/privacy}", "followers_url": "https://api.github.com/users/sgugger/followers", "following_url": "https://api.github.com/users/sgugger/following{/other_user}", "gists_url": "https://api.github.com/users/sgugger/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sgugger", "id": 35901082, "login": "sgugger", "node_id": "MDQ6VXNlcjM1OTAxMDgy", "organizations_url": "https://api.github.com/users/sgugger/orgs", "received_events_url": "https://api.github.com/users/sgugger/received_events", "repos_url": "https://api.github.com/users/sgugger/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sgugger/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sgugger/subscriptions", "type": "User", "url": "https://api.github.com/users/sgugger" }
Update add new dataset template
https://api.github.com/repos/huggingface/datasets/issues/1735/events
null
https://api.github.com/repos/huggingface/datasets/issues/1735/labels{/name}
2021-01-13T15:08:09Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1735.diff", "html_url": "https://github.com/huggingface/datasets/pull/1735", "merged_at": "2021-01-14T15:16:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/1735.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1735" }
785,184,740
[]
https://api.github.com/repos/huggingface/datasets/issues/1735
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR fixes a few typos in the "Add new dataset template" and clarifies a bit what to do for the dummy data creation when the `auto_generate` flag can't work.
2021-01-14T15:16:01Z
https://github.com/huggingface/datasets/pull/1735
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1735/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1734/comments
https://api.github.com/repos/huggingface/datasets/issues/1734/timeline
2021-01-14T10:42:18Z
null
null
MDExOlB1bGxSZXF1ZXN0NTU0MDYxMzMz
closed
[]
false
1,734
{ "avatar_url": "https://avatars.githubusercontent.com/u/15519308?v=4", "events_url": "https://api.github.com/users/cstorm125/events{/privacy}", "followers_url": "https://api.github.com/users/cstorm125/followers", "following_url": "https://api.github.com/users/cstorm125/following{/other_user}", "gists_url": "https://api.github.com/users/cstorm125/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cstorm125", "id": 15519308, "login": "cstorm125", "node_id": "MDQ6VXNlcjE1NTE5MzA4", "organizations_url": "https://api.github.com/users/cstorm125/orgs", "received_events_url": "https://api.github.com/users/cstorm125/received_events", "repos_url": "https://api.github.com/users/cstorm125/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cstorm125/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cstorm125/subscriptions", "type": "User", "url": "https://api.github.com/users/cstorm125" }
Fix empty token bug for `thainer` and `lst20`
https://api.github.com/repos/huggingface/datasets/issues/1734/events
null
https://api.github.com/repos/huggingface/datasets/issues/1734/labels{/name}
2021-01-13T09:55:09Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1734.diff", "html_url": "https://github.com/huggingface/datasets/pull/1734", "merged_at": "2021-01-14T10:42:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/1734.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1734" }
784,956,707
[]
https://api.github.com/repos/huggingface/datasets/issues/1734
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
add a condition to check if tokens exist before yielding in `thainer` and `lst20`
2021-01-14T10:42:18Z
https://github.com/huggingface/datasets/pull/1734
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1734/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1733/comments
https://api.github.com/repos/huggingface/datasets/issues/1733/timeline
2021-08-04T18:13:55Z
null
completed
MDU6SXNzdWU3ODQ5MDMwMDI=
closed
[]
null
1,733
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
connection issue with glue, what is the data url for glue?
https://api.github.com/repos/huggingface/datasets/issues/1733/events
null
https://api.github.com/repos/huggingface/datasets/issues/1733/labels{/name}
2021-01-13T08:37:40Z
null
false
null
null
784,903,002
[]
https://api.github.com/repos/huggingface/datasets/issues/1733
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi my codes sometimes fails due to connection issue with glue, could you tell me how I can have the URL datasets library is trying to read GLUE from to test the machines I am working on if there is an issue on my side or not thanks
2021-08-04T18:13:55Z
https://github.com/huggingface/datasets/issues/1733
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1733/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1732/comments
https://api.github.com/repos/huggingface/datasets/issues/1732/timeline
2021-01-14T10:19:41Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzOTkzNTAx
closed
[]
false
1,732
{ "avatar_url": "https://avatars.githubusercontent.com/u/11708999?v=4", "events_url": "https://api.github.com/users/mounicam/events{/privacy}", "followers_url": "https://api.github.com/users/mounicam/followers", "following_url": "https://api.github.com/users/mounicam/following{/other_user}", "gists_url": "https://api.github.com/users/mounicam/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mounicam", "id": 11708999, "login": "mounicam", "node_id": "MDQ6VXNlcjExNzA4OTk5", "organizations_url": "https://api.github.com/users/mounicam/orgs", "received_events_url": "https://api.github.com/users/mounicam/received_events", "repos_url": "https://api.github.com/users/mounicam/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mounicam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mounicam/subscriptions", "type": "User", "url": "https://api.github.com/users/mounicam" }
[GEM Dataset] Added TurkCorpus, an evaluation dataset for sentence simplification.
https://api.github.com/repos/huggingface/datasets/issues/1732/events
null
https://api.github.com/repos/huggingface/datasets/issues/1732/labels{/name}
2021-01-13T07:50:19Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1732.diff", "html_url": "https://github.com/huggingface/datasets/pull/1732", "merged_at": "2021-01-14T10:19:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/1732.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1732" }
784,874,490
[]
https://api.github.com/repos/huggingface/datasets/issues/1732
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
We want to use TurkCorpus for validation and testing of the sentence simplification task.
2021-01-14T10:19:41Z
https://github.com/huggingface/datasets/pull/1732
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1732/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1731/comments
https://api.github.com/repos/huggingface/datasets/issues/1731/timeline
2021-01-13T11:17:40Z
null
completed
MDU6SXNzdWU3ODQ3NDQ2NzQ=
closed
[]
null
1,731
{ "avatar_url": "https://avatars.githubusercontent.com/u/13365326?v=4", "events_url": "https://api.github.com/users/yangp725/events{/privacy}", "followers_url": "https://api.github.com/users/yangp725/followers", "following_url": "https://api.github.com/users/yangp725/following{/other_user}", "gists_url": "https://api.github.com/users/yangp725/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yangp725", "id": 13365326, "login": "yangp725", "node_id": "MDQ6VXNlcjEzMzY1MzI2", "organizations_url": "https://api.github.com/users/yangp725/orgs", "received_events_url": "https://api.github.com/users/yangp725/received_events", "repos_url": "https://api.github.com/users/yangp725/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yangp725/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangp725/subscriptions", "type": "User", "url": "https://api.github.com/users/yangp725" }
Couldn't reach swda.py
https://api.github.com/repos/huggingface/datasets/issues/1731/events
null
https://api.github.com/repos/huggingface/datasets/issues/1731/labels{/name}
2021-01-13T02:57:40Z
null
false
null
null
784,744,674
[]
https://api.github.com/repos/huggingface/datasets/issues/1731
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/1.2.0/datasets/swda/swda.py
2021-01-13T11:17:40Z
https://github.com/huggingface/datasets/issues/1731
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1731/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1730/comments
https://api.github.com/repos/huggingface/datasets/issues/1730/timeline
2021-01-13T10:19:46Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzNzgxMDY0
closed
[]
false
1,730
{ "avatar_url": "https://avatars.githubusercontent.com/u/35901082?v=4", "events_url": "https://api.github.com/users/sgugger/events{/privacy}", "followers_url": "https://api.github.com/users/sgugger/followers", "following_url": "https://api.github.com/users/sgugger/following{/other_user}", "gists_url": "https://api.github.com/users/sgugger/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sgugger", "id": 35901082, "login": "sgugger", "node_id": "MDQ6VXNlcjM1OTAxMDgy", "organizations_url": "https://api.github.com/users/sgugger/orgs", "received_events_url": "https://api.github.com/users/sgugger/received_events", "repos_url": "https://api.github.com/users/sgugger/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sgugger/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sgugger/subscriptions", "type": "User", "url": "https://api.github.com/users/sgugger" }
Add MNIST dataset
https://api.github.com/repos/huggingface/datasets/issues/1730/events
null
https://api.github.com/repos/huggingface/datasets/issues/1730/labels{/name}
2021-01-12T21:48:02Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1730.diff", "html_url": "https://github.com/huggingface/datasets/pull/1730", "merged_at": "2021-01-13T10:19:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/1730.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1730" }
784,617,525
[]
https://api.github.com/repos/huggingface/datasets/issues/1730
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds the MNIST dataset to the library.
2021-01-13T10:19:47Z
https://github.com/huggingface/datasets/pull/1730
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1730/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1729/comments
https://api.github.com/repos/huggingface/datasets/issues/1729/timeline
2021-03-31T04:24:07Z
null
completed
MDU6SXNzdWU3ODQ1NjU4OTg=
closed
[]
null
1,729
{ "avatar_url": "https://avatars.githubusercontent.com/u/28235457?v=4", "events_url": "https://api.github.com/users/pablodz/events{/privacy}", "followers_url": "https://api.github.com/users/pablodz/followers", "following_url": "https://api.github.com/users/pablodz/following{/other_user}", "gists_url": "https://api.github.com/users/pablodz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pablodz", "id": 28235457, "login": "pablodz", "node_id": "MDQ6VXNlcjI4MjM1NDU3", "organizations_url": "https://api.github.com/users/pablodz/orgs", "received_events_url": "https://api.github.com/users/pablodz/received_events", "repos_url": "https://api.github.com/users/pablodz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pablodz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pablodz/subscriptions", "type": "User", "url": "https://api.github.com/users/pablodz" }
Is there support for Deep learning datasets?
https://api.github.com/repos/huggingface/datasets/issues/1729/events
null
https://api.github.com/repos/huggingface/datasets/issues/1729/labels{/name}
2021-01-12T20:22:41Z
null
false
null
null
784,565,898
[]
https://api.github.com/repos/huggingface/datasets/issues/1729
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I looked around this repository and looking the datasets I think that there's no support for images-datasets. Or am I missing something? For example to add a repo like this https://github.com/DZPeru/fish-datasets
2021-03-31T04:24:07Z
https://github.com/huggingface/datasets/issues/1729
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1729/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1728/comments
https://api.github.com/repos/huggingface/datasets/issues/1728/timeline
2021-01-18T19:15:32Z
null
completed
MDU6SXNzdWU3ODQ0NTgzNDI=
closed
[]
null
1,728
{ "avatar_url": "https://avatars.githubusercontent.com/u/18645407?v=4", "events_url": "https://api.github.com/users/ameet-1997/events{/privacy}", "followers_url": "https://api.github.com/users/ameet-1997/followers", "following_url": "https://api.github.com/users/ameet-1997/following{/other_user}", "gists_url": "https://api.github.com/users/ameet-1997/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ameet-1997", "id": 18645407, "login": "ameet-1997", "node_id": "MDQ6VXNlcjE4NjQ1NDA3", "organizations_url": "https://api.github.com/users/ameet-1997/orgs", "received_events_url": "https://api.github.com/users/ameet-1997/received_events", "repos_url": "https://api.github.com/users/ameet-1997/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ameet-1997/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ameet-1997/subscriptions", "type": "User", "url": "https://api.github.com/users/ameet-1997" }
Add an entry to an arrow dataset
https://api.github.com/repos/huggingface/datasets/issues/1728/events
null
https://api.github.com/repos/huggingface/datasets/issues/1728/labels{/name}
2021-01-12T18:01:47Z
null
false
null
null
784,458,342
[]
https://api.github.com/repos/huggingface/datasets/issues/1728
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Is it possible to add an entry to a dataset object? **Motivation: I want to transform the sentences in the dataset and add them to the original dataset** For example, say we have the following code: ``` python from datasets import load_dataset # Load a dataset and print the first examples in the training set squad_dataset = load_dataset('squad') print(squad_dataset['train'][0]) ``` Is it possible to add an entry to `squad_dataset`? Something like the following? ``` python squad_dataset.append({'text': "This is a new sentence"}) ``` The motivation for doing this is that I want to transform the sentences in the squad dataset and add them to the original dataset. If the above doesn't work, is there any other way of achieving the motivation mentioned above? Perhaps by creating a new arrow dataset by using the older one and the transformer sentences?
2021-01-18T19:15:32Z
https://github.com/huggingface/datasets/issues/1728
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1728/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1727/comments
https://api.github.com/repos/huggingface/datasets/issues/1727/timeline
2022-06-01T16:06:02Z
null
completed
MDU6SXNzdWU3ODQ0MzUxMzE=
closed
[]
null
1,727
{ "avatar_url": "https://avatars.githubusercontent.com/u/6603920?v=4", "events_url": "https://api.github.com/users/nadavo/events{/privacy}", "followers_url": "https://api.github.com/users/nadavo/followers", "following_url": "https://api.github.com/users/nadavo/following{/other_user}", "gists_url": "https://api.github.com/users/nadavo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nadavo", "id": 6603920, "login": "nadavo", "node_id": "MDQ6VXNlcjY2MDM5MjA=", "organizations_url": "https://api.github.com/users/nadavo/orgs", "received_events_url": "https://api.github.com/users/nadavo/received_events", "repos_url": "https://api.github.com/users/nadavo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nadavo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nadavo/subscriptions", "type": "User", "url": "https://api.github.com/users/nadavo" }
BLEURT score calculation raises UnrecognizedFlagError
https://api.github.com/repos/huggingface/datasets/issues/1727/events
null
https://api.github.com/repos/huggingface/datasets/issues/1727/labels{/name}
2021-01-12T17:27:02Z
null
false
null
null
784,435,131
[]
https://api.github.com/repos/huggingface/datasets/issues/1727
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Calling the `compute` method for **bleurt** metric fails with an `UnrecognizedFlagError` for `FLAGS.bleurt_batch_size`. My environment: ``` python==3.8.5 datasets==1.2.0 tensorflow==2.3.1 cudatoolkit==11.0.221 ``` Test code for reproducing the error: ``` from datasets import load_metric bleurt = load_metric('bleurt') gen_text = "I am walking on the promenade today" ref_text = "I am walking along the promenade on this sunny day" bleurt.compute(predictions=[test_text], references=[test_text]) ``` Error Output: ``` Using default BLEURT-Base checkpoint for sequence maximum length 128. You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512'). INFO:tensorflow:Reading checkpoint /home/ubuntu/.cache/huggingface/metrics/bleurt/default/downloads/extracted/9aee35580225730ac5422599f35c4986e4c49cafd08082123342b1019720dac4/bleurt-base-128. INFO:tensorflow:Config file found, reading. INFO:tensorflow:Will load checkpoint bert_custom INFO:tensorflow:Performs basic checks... INFO:tensorflow:... name:bert_custom INFO:tensorflow:... vocab_file:vocab.txt INFO:tensorflow:... bert_config_file:bert_config.json INFO:tensorflow:... do_lower_case:True INFO:tensorflow:... max_seq_length:128 INFO:tensorflow:Creating BLEURT scorer. INFO:tensorflow:Loading model... INFO:tensorflow:BLEURT initialized. --------------------------------------------------------------------------- UnrecognizedFlagError Traceback (most recent call last) <ipython-input-12-8b3f4322318a> in <module> 2 gen_text = "I am walking on the promenade today" 3 ref_text = "I am walking along the promenade on this sunny day" ----> 4 bleurt.compute(predictions=[gen_text], references=[ref_text]) ~/anaconda3/envs/noved/lib/python3.8/site-packages/datasets/metric.py in compute(self, *args, **kwargs) 396 references = self.data["references"] 397 with temp_seed(self.seed): --> 398 output = self._compute(predictions=predictions, references=references, **kwargs) 399 400 if self.buf_writer is not None: ~/.cache/huggingface/modules/datasets_modules/metrics/bleurt/b1de33e1cbbcb1dbe276c887efa1fad68c6aff913885108078fa1ad408908778/bleurt.py in _compute(self, predictions, references) 103 104 def _compute(self, predictions, references): --> 105 scores = self.scorer.score(references=references, candidates=predictions) 106 return {"scores": scores} ~/anaconda3/envs/noved/lib/python3.8/site-packages/bleurt/score.py in score(self, references, candidates, batch_size) 164 """ 165 if not batch_size: --> 166 batch_size = FLAGS.bleurt_batch_size 167 168 candidates, references = list(candidates), list(references) ~/anaconda3/envs/noved/lib/python3.8/site-packages/tensorflow/python/platform/flags.py in __getattr__(self, name) 83 # a flag. 84 if not wrapped.is_parsed(): ---> 85 wrapped(_sys.argv) 86 return wrapped.__getattr__(name) 87 ~/anaconda3/envs/noved/lib/python3.8/site-packages/absl/flags/_flagvalues.py in __call__(self, argv, known_only) 643 for name, value in unknown_flags: 644 suggestions = _helpers.get_flag_suggestions(name, list(self)) --> 645 raise _exceptions.UnrecognizedFlagError( 646 name, value, suggestions=suggestions) 647 UnrecognizedFlagError: Unknown command line flag 'f' ``` Possible Fix: Modify `_compute` method https://github.com/huggingface/datasets/blob/7e64851a12263dc74d41c668167918484c8000ab/metrics/bleurt/bleurt.py#L104 to receive a `batch_size` argument, for example: ``` def _compute(self, predictions, references, batch_size=1): scores = self.scorer.score(references=references, candidates=predictions, batch_size=batch_size) return {"scores": scores} ```
2022-06-01T16:06:02Z
https://github.com/huggingface/datasets/issues/1727
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1727/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1726/comments
https://api.github.com/repos/huggingface/datasets/issues/1726/timeline
2021-01-19T16:42:32Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzNTQ0ODg4
closed
[]
false
1,726
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Offline loading
https://api.github.com/repos/huggingface/datasets/issues/1726/events
null
https://api.github.com/repos/huggingface/datasets/issues/1726/labels{/name}
2021-01-12T15:21:57Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1726.diff", "html_url": "https://github.com/huggingface/datasets/pull/1726", "merged_at": "2021-01-19T16:42:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/1726.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1726" }
784,336,370
[]
https://api.github.com/repos/huggingface/datasets/issues/1726
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As discussed in #824 it would be cool to make the library work in offline mode. Currently if there's not internet connection then modules (datasets or metrics) that have already been loaded in the past can't be loaded and it raises a ConnectionError. This is because `prepare_module` fetches online for the latest version of the module. To make it work in offline mode one suggestion was to reload the latest local version of the module. I implemented that and I also raise a warning saying that the module that is loaded is the latest local version. ```python logger.warning( f"Using the latest cached version of the module from {cached_module_path} since it " f"couldn't be found locally at {input_path} or remotely ({error_type_that_prevented_reaching_out_remote_stuff})." ) ``` I added tests to make sure it works as expected and I needed to do a few changes in the code to be able to test things properly. In particular I added a parameter `hf_modules_cache` to `init_dynamic_modules` for testing purposes. It makes it possible to have temporary modules caches for testing. I also added a `offline` context utility that allows to test part of the code by making all the requests fail as if there was no internet. Close #824, close #761.
2022-02-15T10:32:10Z
https://github.com/huggingface/datasets/pull/1726
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1726/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1725/comments
https://api.github.com/repos/huggingface/datasets/issues/1725/timeline
2022-06-01T16:00:59Z
null
completed
MDU6SXNzdWU3ODQxODIyNzM=
closed
[]
null
1,725
{ "avatar_url": "https://avatars.githubusercontent.com/u/41193842?v=4", "events_url": "https://api.github.com/users/xinjicong/events{/privacy}", "followers_url": "https://api.github.com/users/xinjicong/followers", "following_url": "https://api.github.com/users/xinjicong/following{/other_user}", "gists_url": "https://api.github.com/users/xinjicong/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/xinjicong", "id": 41193842, "login": "xinjicong", "node_id": "MDQ6VXNlcjQxMTkzODQy", "organizations_url": "https://api.github.com/users/xinjicong/orgs", "received_events_url": "https://api.github.com/users/xinjicong/received_events", "repos_url": "https://api.github.com/users/xinjicong/repos", "site_admin": false, "starred_url": "https://api.github.com/users/xinjicong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xinjicong/subscriptions", "type": "User", "url": "https://api.github.com/users/xinjicong" }
load the local dataset
https://api.github.com/repos/huggingface/datasets/issues/1725/events
null
https://api.github.com/repos/huggingface/datasets/issues/1725/labels{/name}
2021-01-12T12:12:55Z
null
false
null
null
784,182,273
[]
https://api.github.com/repos/huggingface/datasets/issues/1725
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
your guidebook's example is like >>>from datasets import load_dataset >>> dataset = load_dataset('json', data_files='my_file.json') but the first arg is path... so how should i do if i want to load the local dataset for model training? i will be grateful if you can help me handle this problem! thanks a lot!
2022-06-01T16:00:59Z
https://github.com/huggingface/datasets/issues/1725
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1725/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1723/comments
https://api.github.com/repos/huggingface/datasets/issues/1723/timeline
2021-01-26T17:02:08Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzMjQ4MzU1
closed
[]
false
1,723
{ "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/philschmid", "id": 32632186, "login": "philschmid", "node_id": "MDQ6VXNlcjMyNjMyMTg2", "organizations_url": "https://api.github.com/users/philschmid/orgs", "received_events_url": "https://api.github.com/users/philschmid/received_events", "repos_url": "https://api.github.com/users/philschmid/repos", "site_admin": false, "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "type": "User", "url": "https://api.github.com/users/philschmid" }
ADD S3 support for downloading and uploading processed datasets
https://api.github.com/repos/huggingface/datasets/issues/1723/events
null
https://api.github.com/repos/huggingface/datasets/issues/1723/labels{/name}
2021-01-12T07:17:34Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1723.diff", "html_url": "https://github.com/huggingface/datasets/pull/1723", "merged_at": "2021-01-26T17:02:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/1723.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1723" }
783,982,100
[]
https://api.github.com/repos/huggingface/datasets/issues/1723
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
# What does this PR do? This PR adds the functionality to load and save `datasets` from and to s3. You can save `datasets` with either `Dataset.save_to_disk()` or `DatasetDict.save_to_disk`. You can load `datasets` with either `load_from_disk` or `Dataset.load_from_disk()`, `DatasetDict.load_from_disk()`. Loading `csv` or `json` datasets from s3 is not implemented. To save/load datasets to s3 you either need to provide an `aws_profile`, which is set up on your machine, per default it uses the `default` profile or you have to pass an `aws_access_key_id` and `aws_secret_access_key`. The implementation was done with the `fsspec` and `boto3`. ### Example `aws_profile` : <details> ```python dataset.save_to_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_profile="hf-sm") load_from_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_profile="hf-sm") ``` </details> ### Example `aws_access_key_id` and `aws_secret_access_key` : <details> ```python dataset.save_to_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_access_key_id="fake_access_key", aws_secret_access_key="fake_secret_key" ) load_from_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_access_key_id="fake_access_key", aws_secret_access_key="fake_secret_key" ) ``` </details> If you want to load a dataset from a public s3 bucket you can pass `anon=True` ### Example `anon=True` : <details> ```python dataset.save_to_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_profile="hf-sm") load_from_disk("s3://moto-mock-s3-bucketdatasets/sdk",anon=True) ``` </details> ### Full Example ```python import datasets dataset = datasets.load_dataset("imdb") print(f"DatasetDict contains {len(dataset)} datasets") print(f"train Dataset has the size of: {len(dataset['train'])}") dataset.save_to_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_profile="hf-sm") remote_dataset = datasets.load_from_disk("s3://moto-mock-s3-bucket/datasets/sdk", aws_profile="hf-sm") print(f"DatasetDict contains {len(remote_dataset)} datasets") print(f"train Dataset has the size of: {len(remote_dataset['train'])}") ``` Related to #878 I would also adjust the documentation after the code would be reviewed, as long as I leave the PR in "draft" status. Something that we can consider is renaming the functions and changing the `_disk` maybe to `_filesystem`
2021-01-26T17:02:08Z
https://github.com/huggingface/datasets/pull/1723
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 3, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1723/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1724/comments
https://api.github.com/repos/huggingface/datasets/issues/1724/timeline
2022-10-05T12:39:07Z
null
completed
MDU6SXNzdWU3ODQwMjMzMzg=
closed
[]
null
1,724
{ "avatar_url": "https://avatars.githubusercontent.com/u/49967236?v=4", "events_url": "https://api.github.com/users/lkcao/events{/privacy}", "followers_url": "https://api.github.com/users/lkcao/followers", "following_url": "https://api.github.com/users/lkcao/following{/other_user}", "gists_url": "https://api.github.com/users/lkcao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lkcao", "id": 49967236, "login": "lkcao", "node_id": "MDQ6VXNlcjQ5OTY3MjM2", "organizations_url": "https://api.github.com/users/lkcao/orgs", "received_events_url": "https://api.github.com/users/lkcao/received_events", "repos_url": "https://api.github.com/users/lkcao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lkcao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lkcao/subscriptions", "type": "User", "url": "https://api.github.com/users/lkcao" }
could not run models on a offline server successfully
https://api.github.com/repos/huggingface/datasets/issues/1724/events
null
https://api.github.com/repos/huggingface/datasets/issues/1724/labels{/name}
2021-01-12T06:08:06Z
null
false
null
null
784,023,338
[]
https://api.github.com/repos/huggingface/datasets/issues/1724
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, I really need your help about this. I am trying to fine-tuning a RoBERTa on a remote server, which is strictly banning internet. I try to install all the packages by hand and try to run run_mlm.py on the server. It works well on colab, but when I try to run it on this offline server, it shows: ![image](https://user-images.githubusercontent.com/49967236/104276256-25a88600-546a-11eb-9776-8ec695dfa24e.png) is there anything I can do? Is it possible to download all the things in cache and upload it to the server? Please help me out...
2022-10-05T12:39:07Z
https://github.com/huggingface/datasets/issues/1724
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1724/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1722/comments
https://api.github.com/repos/huggingface/datasets/issues/1722/timeline
2021-01-12T17:35:57Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzMTk3MTg4
closed
[]
false
1,722
{ "avatar_url": "https://avatars.githubusercontent.com/u/11708999?v=4", "events_url": "https://api.github.com/users/mounicam/events{/privacy}", "followers_url": "https://api.github.com/users/mounicam/followers", "following_url": "https://api.github.com/users/mounicam/following{/other_user}", "gists_url": "https://api.github.com/users/mounicam/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mounicam", "id": 11708999, "login": "mounicam", "node_id": "MDQ6VXNlcjExNzA4OTk5", "organizations_url": "https://api.github.com/users/mounicam/orgs", "received_events_url": "https://api.github.com/users/mounicam/received_events", "repos_url": "https://api.github.com/users/mounicam/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mounicam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mounicam/subscriptions", "type": "User", "url": "https://api.github.com/users/mounicam" }
Added unfiltered versions of the Wiki-Auto training data for the GEM simplification task.
https://api.github.com/repos/huggingface/datasets/issues/1722/events
null
https://api.github.com/repos/huggingface/datasets/issues/1722/labels{/name}
2021-01-12T05:26:04Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1722.diff", "html_url": "https://github.com/huggingface/datasets/pull/1722", "merged_at": "2021-01-12T17:35:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/1722.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1722" }
783,921,679
[]
https://api.github.com/repos/huggingface/datasets/issues/1722
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
2021-01-12T18:14:53Z
https://github.com/huggingface/datasets/pull/1722
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1722/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1721/comments
https://api.github.com/repos/huggingface/datasets/issues/1721/timeline
2021-01-12T11:41:47Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzMTIyODQ5
closed
[]
false
1,721
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[Scientific papers] Mirror datasets zip
https://api.github.com/repos/huggingface/datasets/issues/1721/events
null
https://api.github.com/repos/huggingface/datasets/issues/1721/labels{/name}
2021-01-12T01:15:40Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1721.diff", "html_url": "https://github.com/huggingface/datasets/pull/1721", "merged_at": "2021-01-12T11:41:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/1721.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1721" }
783,828,428
[]
https://api.github.com/repos/huggingface/datasets/issues/1721
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Datasets were uploading to https://s3.amazonaws.com/datasets.huggingface.co/scientific_papers/1.1.1/arxiv-dataset.zip and https://s3.amazonaws.com/datasets.huggingface.co/scientific_papers/1.1.1/pubmed-dataset.zip respectively to escape google drive quota and enable faster download.
2021-01-12T11:49:15Z
https://github.com/huggingface/datasets/pull/1721
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1721/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1720/comments
https://api.github.com/repos/huggingface/datasets/issues/1720/timeline
2021-03-31T14:13:17Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUzMDM0MzYx
closed
[]
false
1,720
{ "avatar_url": "https://avatars.githubusercontent.com/u/173537?v=4", "events_url": "https://api.github.com/users/versae/events{/privacy}", "followers_url": "https://api.github.com/users/versae/followers", "following_url": "https://api.github.com/users/versae/following{/other_user}", "gists_url": "https://api.github.com/users/versae/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/versae", "id": 173537, "login": "versae", "node_id": "MDQ6VXNlcjE3MzUzNw==", "organizations_url": "https://api.github.com/users/versae/orgs", "received_events_url": "https://api.github.com/users/versae/received_events", "repos_url": "https://api.github.com/users/versae/repos", "site_admin": false, "starred_url": "https://api.github.com/users/versae/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/versae/subscriptions", "type": "User", "url": "https://api.github.com/users/versae" }
Adding the NorNE dataset for NER
https://api.github.com/repos/huggingface/datasets/issues/1720/events
null
https://api.github.com/repos/huggingface/datasets/issues/1720/labels{/name}
2021-01-11T21:34:13Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1720.diff", "html_url": "https://github.com/huggingface/datasets/pull/1720", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1720.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1720" }
783,721,833
[]
https://api.github.com/repos/huggingface/datasets/issues/1720
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
NorNE is a manually annotated corpus of named entities which extends the annotation of the existing Norwegian Dependency Treebank. Comprising both of the official standards of written Norwegian (Bokmål and Nynorsk), the corpus contains around 600,000 tokens and annotates a rich set of entity types including persons, organizations, locations, geo-political entities, products, and events, in addition to a class corresponding to nominals derived from names.
2021-03-31T14:23:49Z
https://github.com/huggingface/datasets/pull/1720
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1720/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1719/comments
https://api.github.com/repos/huggingface/datasets/issues/1719/timeline
2021-01-11T18:45:02Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUyODk3MzY4
closed
[]
false
1,719
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix column list comparison in transmit format
https://api.github.com/repos/huggingface/datasets/issues/1719/events
null
https://api.github.com/repos/huggingface/datasets/issues/1719/labels{/name}
2021-01-11T17:23:56Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1719.diff", "html_url": "https://github.com/huggingface/datasets/pull/1719", "merged_at": "2021-01-11T18:45:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/1719.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1719" }
783,557,542
[]
https://api.github.com/repos/huggingface/datasets/issues/1719
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in #1718 the cache might not reload the cache files when new columns were added. This is because of an issue in `transmit_format` where the column list comparison fails because the order was not deterministic. This causes the `transmit_format` to apply an unnecessary `set_format` transform with shuffled column names. I fixed that by sorting the columns for the comparison and added a test. To properly test that I added a third column `col_3` to the dummy_dataset used for tests.
2021-01-11T18:45:03Z
https://github.com/huggingface/datasets/pull/1719
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1719/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1718/comments
https://api.github.com/repos/huggingface/datasets/issues/1718/timeline
2021-01-26T02:47:59Z
null
completed
MDU6SXNzdWU3ODM0NzQ3NTM=
closed
[]
null
1,718
{ "avatar_url": "https://avatars.githubusercontent.com/u/18296312?v=4", "events_url": "https://api.github.com/users/ofirzaf/events{/privacy}", "followers_url": "https://api.github.com/users/ofirzaf/followers", "following_url": "https://api.github.com/users/ofirzaf/following{/other_user}", "gists_url": "https://api.github.com/users/ofirzaf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ofirzaf", "id": 18296312, "login": "ofirzaf", "node_id": "MDQ6VXNlcjE4Mjk2MzEy", "organizations_url": "https://api.github.com/users/ofirzaf/orgs", "received_events_url": "https://api.github.com/users/ofirzaf/received_events", "repos_url": "https://api.github.com/users/ofirzaf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ofirzaf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ofirzaf/subscriptions", "type": "User", "url": "https://api.github.com/users/ofirzaf" }
Possible cache miss in datasets
https://api.github.com/repos/huggingface/datasets/issues/1718/events
null
https://api.github.com/repos/huggingface/datasets/issues/1718/labels{/name}
2021-01-11T15:37:31Z
null
false
null
null
783,474,753
[]
https://api.github.com/repos/huggingface/datasets/issues/1718
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, I am using the datasets package and even though I run the same data processing functions, datasets always recomputes the function instead of using cache. I have attached an example script that for me reproduces the problem. In the attached example the second map function always recomputes instead of loading from cache. Is this a bug or am I doing something wrong? Is there a way for fix this and avoid all the recomputation? Thanks Edit: transformers==3.5.1 datasets==1.2.0 ``` from datasets import load_dataset from transformers import AutoTokenizer datasets = load_dataset('wikitext', 'wikitext-103-raw-v1') tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', use_fast=True) column_names = datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name], return_special_tokens_mask=True) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=60, remove_columns=[text_column_name], load_from_cache_file=True, ) max_seq_length = tokenizer.model_max_length def group_texts(examples): # Concatenate all texts. concatenated_examples = { k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // max_seq_length) * max_seq_length # Split by chunks of max_len. result = { k: [t[i: i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result tokenized_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=60, load_from_cache_file=True, ) print(tokenized_datasets) print('finished') ```
2022-06-29T14:54:42Z
https://github.com/huggingface/datasets/issues/1718
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/1718/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1717/comments
https://api.github.com/repos/huggingface/datasets/issues/1717/timeline
2021-01-26T02:52:17Z
null
completed
MDU6SXNzdWU3ODMwNzQyNTU=
closed
[]
null
1,717
{ "avatar_url": "https://avatars.githubusercontent.com/u/3091916?v=4", "events_url": "https://api.github.com/users/dwadden/events{/privacy}", "followers_url": "https://api.github.com/users/dwadden/followers", "following_url": "https://api.github.com/users/dwadden/following{/other_user}", "gists_url": "https://api.github.com/users/dwadden/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dwadden", "id": 3091916, "login": "dwadden", "node_id": "MDQ6VXNlcjMwOTE5MTY=", "organizations_url": "https://api.github.com/users/dwadden/orgs", "received_events_url": "https://api.github.com/users/dwadden/received_events", "repos_url": "https://api.github.com/users/dwadden/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dwadden/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dwadden/subscriptions", "type": "User", "url": "https://api.github.com/users/dwadden" }
SciFact dataset - minor changes
https://api.github.com/repos/huggingface/datasets/issues/1717/events
null
https://api.github.com/repos/huggingface/datasets/issues/1717/labels{/name}
2021-01-11T05:26:40Z
null
false
null
null
783,074,255
[]
https://api.github.com/repos/huggingface/datasets/issues/1717
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, SciFact dataset creator here. First of all, thanks for adding the dataset to Huggingface, much appreciated! I'd like to make a few minor changes, including the citation information and the `_URL` from which to download the dataset. Can I submit a PR for this? It also looks like the dataset is being downloaded directly from Huggingface's Google cloud account rather than via the `_URL` in [scifact.py](https://github.com/huggingface/datasets/blob/master/datasets/scifact/scifact.py). Can you help me update the version on gcloud? Thanks, Dave
2021-01-26T02:52:17Z
https://github.com/huggingface/datasets/issues/1717
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1717/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1716/comments
https://api.github.com/repos/huggingface/datasets/issues/1716/timeline
2021-01-18T14:21:42Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUyMjgzNzE5
closed
[]
false
1,716
{ "avatar_url": "https://avatars.githubusercontent.com/u/48222101?v=4", "events_url": "https://api.github.com/users/kushal2000/events{/privacy}", "followers_url": "https://api.github.com/users/kushal2000/followers", "following_url": "https://api.github.com/users/kushal2000/following{/other_user}", "gists_url": "https://api.github.com/users/kushal2000/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kushal2000", "id": 48222101, "login": "kushal2000", "node_id": "MDQ6VXNlcjQ4MjIyMTAx", "organizations_url": "https://api.github.com/users/kushal2000/orgs", "received_events_url": "https://api.github.com/users/kushal2000/received_events", "repos_url": "https://api.github.com/users/kushal2000/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kushal2000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kushal2000/subscriptions", "type": "User", "url": "https://api.github.com/users/kushal2000" }
Add Hatexplain Dataset
https://api.github.com/repos/huggingface/datasets/issues/1716/events
null
https://api.github.com/repos/huggingface/datasets/issues/1716/labels{/name}
2021-01-10T13:30:01Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1716.diff", "html_url": "https://github.com/huggingface/datasets/pull/1716", "merged_at": "2021-01-18T14:21:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/1716.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1716" }
782,819,006
[]
https://api.github.com/repos/huggingface/datasets/issues/1716
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adding Hatexplain - the first benchmark hate speech dataset covering multiple aspects of the issue
2021-01-18T14:21:42Z
https://github.com/huggingface/datasets/pull/1716
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1716/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1715/comments
https://api.github.com/repos/huggingface/datasets/issues/1715/timeline
2021-01-12T17:14:33Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUyMjM2NDA5
closed
[]
false
1,715
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu" }
add Korean intonation-aided intention identification dataset
https://api.github.com/repos/huggingface/datasets/issues/1715/events
null
https://api.github.com/repos/huggingface/datasets/issues/1715/labels{/name}
2021-01-10T06:29:04Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1715.diff", "html_url": "https://github.com/huggingface/datasets/pull/1715", "merged_at": "2021-01-12T17:14:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/1715.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1715" }
782,754,441
[]
https://api.github.com/repos/huggingface/datasets/issues/1715
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
2021-09-17T16:54:13Z
https://github.com/huggingface/datasets/pull/1715
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1715/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1714/comments
https://api.github.com/repos/huggingface/datasets/issues/1714/timeline
2021-01-13T16:05:24Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUxOTc3MDA0
closed
[]
false
1,714
{ "avatar_url": "https://avatars.githubusercontent.com/u/15869827?v=4", "events_url": "https://api.github.com/users/maxbartolo/events{/privacy}", "followers_url": "https://api.github.com/users/maxbartolo/followers", "following_url": "https://api.github.com/users/maxbartolo/following{/other_user}", "gists_url": "https://api.github.com/users/maxbartolo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/maxbartolo", "id": 15869827, "login": "maxbartolo", "node_id": "MDQ6VXNlcjE1ODY5ODI3", "organizations_url": "https://api.github.com/users/maxbartolo/orgs", "received_events_url": "https://api.github.com/users/maxbartolo/received_events", "repos_url": "https://api.github.com/users/maxbartolo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/maxbartolo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maxbartolo/subscriptions", "type": "User", "url": "https://api.github.com/users/maxbartolo" }
Adding adversarialQA dataset
https://api.github.com/repos/huggingface/datasets/issues/1714/events
null
https://api.github.com/repos/huggingface/datasets/issues/1714/labels{/name}
2021-01-08T21:46:09Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1714.diff", "html_url": "https://github.com/huggingface/datasets/pull/1714", "merged_at": "2021-01-13T16:05:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/1714.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1714" }
782,416,276
[]
https://api.github.com/repos/huggingface/datasets/issues/1714
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adding the adversarialQA dataset (https://adversarialqa.github.io/) from Beat the AI (https://arxiv.org/abs/2002.00293)
2021-01-13T16:05:24Z
https://github.com/huggingface/datasets/pull/1714
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1714/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1713/comments
https://api.github.com/repos/huggingface/datasets/issues/1713/timeline
2021-09-17T12:47:40Z
null
completed
MDU6SXNzdWU3ODIzMzc3MjM=
closed
[]
null
1,713
{ "avatar_url": "https://avatars.githubusercontent.com/u/9393002?v=4", "events_url": "https://api.github.com/users/pranav-s/events{/privacy}", "followers_url": "https://api.github.com/users/pranav-s/followers", "following_url": "https://api.github.com/users/pranav-s/following{/other_user}", "gists_url": "https://api.github.com/users/pranav-s/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pranav-s", "id": 9393002, "login": "pranav-s", "node_id": "MDQ6VXNlcjkzOTMwMDI=", "organizations_url": "https://api.github.com/users/pranav-s/orgs", "received_events_url": "https://api.github.com/users/pranav-s/received_events", "repos_url": "https://api.github.com/users/pranav-s/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pranav-s/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pranav-s/subscriptions", "type": "User", "url": "https://api.github.com/users/pranav-s" }
Installation using conda
https://api.github.com/repos/huggingface/datasets/issues/1713/events
null
https://api.github.com/repos/huggingface/datasets/issues/1713/labels{/name}
2021-01-08T19:12:15Z
null
false
null
null
782,337,723
[]
https://api.github.com/repos/huggingface/datasets/issues/1713
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Will a conda package for installing datasets be added to the huggingface conda channel? I have installed transformers using conda and would like to use the datasets library to use some of the scripts in the transformers/examples folder but am unable to do so at the moment as datasets can only be installed using pip and using pip in a conda environment is generally a bad idea in my experience.
2021-09-17T12:47:40Z
https://github.com/huggingface/datasets/issues/1713
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1713/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1712/comments
https://api.github.com/repos/huggingface/datasets/issues/1712/timeline
2021-01-21T10:31:11Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUxODkxMDk4
closed
[]
false
1,712
{ "avatar_url": "https://avatars.githubusercontent.com/u/1551356?v=4", "events_url": "https://api.github.com/users/eusip/events{/privacy}", "followers_url": "https://api.github.com/users/eusip/followers", "following_url": "https://api.github.com/users/eusip/following{/other_user}", "gists_url": "https://api.github.com/users/eusip/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eusip", "id": 1551356, "login": "eusip", "node_id": "MDQ6VXNlcjE1NTEzNTY=", "organizations_url": "https://api.github.com/users/eusip/orgs", "received_events_url": "https://api.github.com/users/eusip/received_events", "repos_url": "https://api.github.com/users/eusip/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eusip/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eusip/subscriptions", "type": "User", "url": "https://api.github.com/users/eusip" }
Silicone
https://api.github.com/repos/huggingface/datasets/issues/1712/events
null
https://api.github.com/repos/huggingface/datasets/issues/1712/labels{/name}
2021-01-08T18:24:18Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1712.diff", "html_url": "https://github.com/huggingface/datasets/pull/1712", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1712.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1712" }
782,313,097
[]
https://api.github.com/repos/huggingface/datasets/issues/1712
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
My collaborators and I within the Affective Computing team at Telecom Paris would like to push our spoken dialogue dataset for publication.
2021-01-21T14:12:37Z
https://github.com/huggingface/datasets/pull/1712
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1712/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1711/comments
https://api.github.com/repos/huggingface/datasets/issues/1711/timeline
2021-01-11T09:23:19Z
null
null
MDExOlB1bGxSZXF1ZXN0NTUxNzQxODA2
closed
[]
false
1,711
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix windows path scheme in cached path
https://api.github.com/repos/huggingface/datasets/issues/1711/events
null
https://api.github.com/repos/huggingface/datasets/issues/1711/labels{/name}
2021-01-08T13:45:56Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1711.diff", "html_url": "https://github.com/huggingface/datasets/pull/1711", "merged_at": "2021-01-11T09:23:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/1711.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1711" }
782,129,083
[]
https://api.github.com/repos/huggingface/datasets/issues/1711
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in #807 there's currently an issue with `cached_path` not raising `FileNotFoundError` on windows for absolute paths. This is due to the way we check for a path to be local or not. The check on the scheme using urlparse was incomplete. I fixed this and added tests
2021-01-11T09:23:20Z
https://github.com/huggingface/datasets/pull/1711
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1711/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1710/comments
https://api.github.com/repos/huggingface/datasets/issues/1710/timeline
2022-08-04T11:55:04Z
null
completed
MDU6SXNzdWU3ODE5MTQ5NTE=
closed
[]
null
1,710
{ "avatar_url": "https://avatars.githubusercontent.com/u/5771366?v=4", "events_url": "https://api.github.com/users/fredriko/events{/privacy}", "followers_url": "https://api.github.com/users/fredriko/followers", "following_url": "https://api.github.com/users/fredriko/following{/other_user}", "gists_url": "https://api.github.com/users/fredriko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fredriko", "id": 5771366, "login": "fredriko", "node_id": "MDQ6VXNlcjU3NzEzNjY=", "organizations_url": "https://api.github.com/users/fredriko/orgs", "received_events_url": "https://api.github.com/users/fredriko/received_events", "repos_url": "https://api.github.com/users/fredriko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fredriko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fredriko/subscriptions", "type": "User", "url": "https://api.github.com/users/fredriko" }
IsADirectoryError when trying to download C4
https://api.github.com/repos/huggingface/datasets/issues/1710/events
null
https://api.github.com/repos/huggingface/datasets/issues/1710/labels{/name}
2021-01-08T07:31:30Z
null
false
null
null
781,914,951
[]
https://api.github.com/repos/huggingface/datasets/issues/1710
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
**TLDR**: I fail to download C4 and see a stacktrace originating in `IsADirectoryError` as an explanation for failure. How can the problem be fixed? **VERBOSE**: I use Python version 3.7 and have the following dependencies listed in my project: ``` datasets==1.2.0 apache-beam==2.26.0 ``` When running the following code, where `/data/huggingface/unpacked/` contains a single unzipped `wet.paths` file manually downloaded as per the instructions for C4: ``` from datasets import load_dataset load_dataset("c4", "en", data_dir="/data/huggingface/unpacked", beam_runner='DirectRunner') ``` I get the following stacktrace: ``` /Users/fredriko/venv/misc/bin/python /Users/fredriko/source/misc/main.py Downloading and preparing dataset c4/en (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /Users/fredriko/.cache/huggingface/datasets/c4/en/2.3.0/8304cf264cc42bdebcb13fca4b9cb36368a96f557d36f9dc969bebbe2568b283... Traceback (most recent call last): File "/Users/fredriko/source/misc/main.py", line 3, in <module> load_dataset("c4", "en", data_dir="/data/huggingface/unpacked", beam_runner='DirectRunner') File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/load.py", line 612, in load_dataset ignore_verifications=ignore_verifications, File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 527, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 1066, in _download_and_prepare pipeline=pipeline, File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/builder.py", line 582, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/Users/fredriko/.cache/huggingface/modules/datasets_modules/datasets/c4/8304cf264cc42bdebcb13fca4b9cb36368a96f557d36f9dc969bebbe2568b283/c4.py", line 190, in _split_generators file_paths = dl_manager.download_and_extract(files_to_download) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 258, in download_and_extract return self.extract(self.download(url_or_urls)) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 189, in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 117, in _record_sizes_checksums self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(path) File "/Users/fredriko/venv/misc/lib/python3.7/site-packages/datasets/utils/info_utils.py", line 80, in get_size_checksum_dict with open(path, "rb") as f: IsADirectoryError: [Errno 21] Is a directory: '/' Process finished with exit code 1 ```
2022-08-04T11:56:10Z
https://github.com/huggingface/datasets/issues/1710
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1710/reactions" }
false