Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
9157196
1 Parent(s): 3d1fe33

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (623cb1688ac1db6682ece4c2f44c5d8a8b579939)
- Add open data files (50f05aca5331743829881cad8eebc99fdd4b5d1f)
- Delete loading script (8200d1c49d753c3d1f89b20dd6b62af5fe5034ac)
- Delete legacy dataset_infos.json (b22003ec2ea344c5aea879a41f566919b11266f2)

README.md CHANGED
@@ -47,10 +47,10 @@ dataset_info:
47
  dtype: string
48
  splits:
49
  - name: train
50
- num_bytes: 2976697816
51
  num_examples: 8904060
52
- download_size: 710727536
53
- dataset_size: 2976697816
54
  - config_name: open
55
  features:
56
  - name: subject
@@ -77,10 +77,20 @@ dataset_info:
77
  dtype: string
78
  splits:
79
  - name: train
80
- num_bytes: 2882678298
81
  num_examples: 8904060
82
- download_size: 710727536
83
- dataset_size: 2882678298
 
 
 
 
 
 
 
 
 
 
84
  ---
85
 
86
  # Dataset Card for Ascent KB
 
47
  dtype: string
48
  splits:
49
  - name: train
50
+ num_bytes: 2976665740
51
  num_examples: 8904060
52
+ download_size: 898478552
53
+ dataset_size: 2976665740
54
  - config_name: open
55
  features:
56
  - name: subject
 
77
  dtype: string
78
  splits:
79
  - name: train
80
+ num_bytes: 2882646222
81
  num_examples: 8904060
82
+ download_size: 900156754
83
+ dataset_size: 2882646222
84
+ configs:
85
+ - config_name: canonical
86
+ data_files:
87
+ - split: train
88
+ path: canonical/train-*
89
+ default: true
90
+ - config_name: open
91
+ data_files:
92
+ - split: train
93
+ path: open/train-*
94
  ---
95
 
96
  # Dataset Card for Ascent KB
ascent_kb.py DELETED
@@ -1,147 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Ascent KB: A Deep Commonsense Knowledge Base"""
16
-
17
- import json
18
-
19
- import datasets
20
-
21
-
22
- _CITATION = """\
23
- @InProceedings{nguyen2021www,
24
- title={Advanced Semantics for Commonsense Knowledge Extraction},
25
- author={Nguyen, Tuan-Phong and Razniewski, Simon and Weikum, Gerhard},
26
- year={2021},
27
- booktitle={The Web Conference 2021},
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- This dataset contains 8.9M commonsense assertions extracted by the Ascent pipeline (https://ascent.mpi-inf.mpg.de/).
33
- """
34
-
35
- _HOMEPAGE = "https://ascent.mpi-inf.mpg.de/"
36
-
37
- _LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"
38
-
39
- # The HuggingFace dataset library don't host the datasets but only point to the original files
40
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
41
-
42
- _URL = "https://nextcloud.mpi-klsb.mpg.de/index.php/s/dFLdTQHqiFrt3Q3/download"
43
-
44
-
45
- # DONE: Name of the dataset usually match the script name with CamelCase instead of snake_case
46
- class AscentKB(datasets.GeneratorBasedBuilder):
47
- """Ascent KB: A Deep Commonsense Knowledge Base. Version 1.0.0."""
48
-
49
- VERSION = datasets.Version("1.0.0")
50
-
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(
53
- name="canonical",
54
- version=VERSION,
55
- description="This KB contains <arg1 ; rel ; arg2> \
56
- assertions where relations are canonicalized based on ConceptNet relations.",
57
- ),
58
- datasets.BuilderConfig(
59
- name="open",
60
- version=VERSION,
61
- description="This KB contains open assertions of the form \
62
- <subject ; predicate ; object> extracted directly from web contents.",
63
- ),
64
- ]
65
-
66
- DEFAULT_CONFIG_NAME = "canonical"
67
-
68
- def _info(self):
69
- if self.config.name == "canonical":
70
- features = datasets.Features(
71
- {
72
- "arg1": datasets.Value("string"),
73
- "rel": datasets.Value("string"),
74
- "arg2": datasets.Value("string"),
75
- "support": datasets.Value("int64"),
76
- "facets": [
77
- {
78
- "value": datasets.Value("string"),
79
- "type": datasets.Value("string"),
80
- "support": datasets.Value("int64"),
81
- }
82
- ],
83
- "source_sentences": [{"text": datasets.Value("string"), "source": datasets.Value("string")}],
84
- }
85
- )
86
- else: # features for the "open" part
87
- features = datasets.Features(
88
- {
89
- "subject": datasets.Value("string"),
90
- "predicate": datasets.Value("string"),
91
- "object": datasets.Value("string"),
92
- "support": datasets.Value("int64"),
93
- "facets": [
94
- {
95
- "value": datasets.Value("string"),
96
- "type": datasets.Value("string"),
97
- "support": datasets.Value("int64"),
98
- }
99
- ],
100
- "source_sentences": [{"text": datasets.Value("string"), "source": datasets.Value("string")}],
101
- }
102
- )
103
- return datasets.DatasetInfo(
104
- description=_DESCRIPTION,
105
- features=features,
106
- supervised_keys=None,
107
- homepage=_HOMEPAGE,
108
- license=_LICENSE,
109
- citation=_CITATION,
110
- )
111
-
112
- def _split_generators(self, dl_manager):
113
- """Returns SplitGenerators."""
114
- # my_urls = _URLs[self.config.name]
115
- # data_file = dl_manager.download_and_extract(my_urls)
116
-
117
- data_file = dl_manager.download_and_extract(_URL)
118
-
119
- return [
120
- datasets.SplitGenerator(
121
- name=datasets.Split.TRAIN,
122
- gen_kwargs={
123
- "filepath": data_file,
124
- "split": "train",
125
- },
126
- ),
127
- ]
128
-
129
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
130
- def _generate_examples(self, filepath, split):
131
- """Yields examples as (key, example) tuples."""
132
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
133
- # The `key` is here for legacy reason (tfds) and is not important in itself.
134
-
135
- with open(filepath, encoding="utf-8") as f:
136
- for id_, row in enumerate(f):
137
- data = json.loads(row)
138
- if self.config.name == "canonical":
139
- data.pop("subject")
140
- data.pop("predicate")
141
- data.pop("object")
142
- yield id_, data
143
- else: # "open"
144
- data.pop("arg1")
145
- data.pop("rel")
146
- data.pop("arg2")
147
- yield id_, data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
canonical/train-00000-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd97aa1bcb7808054a141a725bc0e141c79d21dd860182186141ebb0d0ada9d1
3
+ size 150253225
canonical/train-00001-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0bee136e4edfd74f3a69d855c31fe75ff15bbd202f060c9d0bb61bebbd565a1
3
+ size 150167669
canonical/train-00002-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a6981079a42454ac3e1541c8642be2e6c8fed76d0e07aa3ee4f8e05b313cae
3
+ size 149005425
canonical/train-00003-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aa575080c5e910e3b7a3163990adbfd44b951c3f5c5823182768ccf100b899b
3
+ size 150986419
canonical/train-00004-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8541744e2865942e73b4396b46e209f4c7045cb05d63f73d023e411f95bcbaef
3
+ size 149491427
canonical/train-00005-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08f798cc54277cc0fb7d32d2e08855c58c2b856a839a068e0279b7dc905df8bd
3
+ size 148574387
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"canonical": {"description": "This dataset contains 8.9M commonsense assertions extracted by the Ascent pipeline (https://ascent.mpi-inf.mpg.de/).\n", "citation": "@InProceedings{nguyen2021www,\n title={Advanced Semantics for Commonsense Knowledge Extraction},\n author={Nguyen, Tuan-Phong and Razniewski, Simon and Weikum, Gerhard},\n year={2021},\n booktitle={The Web Conference 2021},\n}\n", "homepage": "https://ascent.mpi-inf.mpg.de/", "license": "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/", "features": {"arg1": {"dtype": "string", "id": null, "_type": "Value"}, "rel": {"dtype": "string", "id": null, "_type": "Value"}, "arg2": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "int64", "id": null, "_type": "Value"}, "facets": [{"value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "int64", "id": null, "_type": "Value"}}], "source_sentences": [{"text": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "builder_name": "ascent_kb", "config_name": "canonical", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2976697816, "num_examples": 8904060, "dataset_name": "ascent_kb"}}, "download_checksums": {"https://nextcloud.mpi-klsb.mpg.de/index.php/s/dFLdTQHqiFrt3Q3/download": {"num_bytes": 710727536, "checksum": "51fd88a07bca4fa48a9157dd1d93d9bac88ad2b38b5eae662d2cbfad47895016"}}, "download_size": 710727536, "post_processing_size": null, "dataset_size": 2976697816, "size_in_bytes": 3687425352}, "open": {"description": "This dataset contains 8.9M commonsense assertions extracted by the Ascent pipeline (https://ascent.mpi-inf.mpg.de/).\n", "citation": "@InProceedings{nguyen2021www,\n title={Advanced Semantics for Commonsense Knowledge Extraction},\n author={Nguyen, Tuan-Phong and Razniewski, Simon and Weikum, Gerhard},\n year={2021},\n booktitle={The Web Conference 2021},\n}\n", "homepage": "https://ascent.mpi-inf.mpg.de/", "license": "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/", "features": {"subject": {"dtype": "string", "id": null, "_type": "Value"}, "predicate": {"dtype": "string", "id": null, "_type": "Value"}, "object": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "int64", "id": null, "_type": "Value"}, "facets": [{"value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "support": {"dtype": "int64", "id": null, "_type": "Value"}}], "source_sentences": [{"text": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "builder_name": "ascent_kb", "config_name": "open", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2882678298, "num_examples": 8904060, "dataset_name": "ascent_kb"}}, "download_checksums": {"https://nextcloud.mpi-klsb.mpg.de/index.php/s/dFLdTQHqiFrt3Q3/download": {"num_bytes": 710727536, "checksum": "51fd88a07bca4fa48a9157dd1d93d9bac88ad2b38b5eae662d2cbfad47895016"}}, "download_size": 710727536, "post_processing_size": null, "dataset_size": 2882678298, "size_in_bytes": 3593405834}}
 
 
open/train-00000-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a4226014ff897286a724014813d6002af213213f20fd47775b4849a40f40b6
3
+ size 150529519
open/train-00001-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10aa60e2dcd6b210d1e7553222f7d1de7623db95827d31a93b715d6d5416021f
3
+ size 150431266
open/train-00002-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ffe351b850fced8c7338ca6c0f6c14c557884708830e2d1179b5fa968ba213e
3
+ size 149313188
open/train-00003-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c9b61c964606a0e05e16ad39d1d27ab83ad5dfb57a39ec3f0cdd11523344213
3
+ size 151245670
open/train-00004-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ad9dc7ce124ef03e35e712b6c1d9c9eaccd54e388f0763e4c709f675c0db5c
3
+ size 149777060
open/train-00005-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fc4b35c9e6ea598218a4c7c63d92fbed047f15f4c5d654780856134fa527883
3
+ size 148860051