parquet-converter commited on
Commit
b0ba429
1 Parent(s): 68b61d5

Update parquet files

Browse files
CSKG.py DELETED
@@ -1,195 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @article{ilievski2021cskg,
29
- title={CSKG: The CommonSense Knowledge Graph},
30
- author={Ilievski, Filip and Szekely, Pedro and Zhang, Bin},
31
- journal={Extended Semantic Web Conference (ESWC)},
32
- year={2021}
33
- }
34
- """
35
-
36
- # TODO: Add description of the dataset here
37
- # You can copy an official description
38
- _DESCRIPTION = """\
39
- CSKG is a commonsense knowledge graph that combines seven popular sources into a consolidated representation: ATOMIC, ConceptNet, FrameNet, Roget, Visual Genome, Wikidata (We use the Wikidata-CS subset), and WordNet. CSKG is represented as a hyper-relational graph, by using the KGTK data model and file specification. Its creation is entirely supported by KGTK operations.
40
- """
41
-
42
- # TODO: Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = "https://cskg.readthedocs.io/en/latest/"
44
-
45
- # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International License"
47
-
48
- # TODO: Add link to the official dataset URLs here
49
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- # _URLS = {
52
- # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
- # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
54
- # }
55
- _URLS = {
56
- "cskg": "https://zenodo.org/record/4331372/files/cskg.tsv.gz",
57
- }
58
-
59
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
60
- class CSKG(datasets.GeneratorBasedBuilder):
61
- """a commonsense knowledge graph"""
62
-
63
- VERSION = datasets.Version("1.1.0")
64
-
65
- # This is an example of a dataset with multiple configurations.
66
- # If you don't want/need to define several sub-sets in your dataset,
67
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
-
69
- # If you need to make complex sub-parts in the datasets with configurable options
70
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
-
73
- # You will be able to load one or the other configurations in the following list with
74
- # data = datasets.load_dataset('my_dataset', 'first_domain')
75
- # data = datasets.load_dataset('my_dataset', 'second_domain')
76
- BUILDER_CONFIGS = [
77
- datasets.BuilderConfig(name="cskg", version=VERSION, description="The relationships defined by cskg"),
78
- # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
79
- ]
80
-
81
- DEFAULT_CONFIG_NAME = "cskg" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
-
83
- def _info(self):
84
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
85
- if self.config.name == "cskg": # This is the name of the configuration selected in BUILDER_CONFIGS above
86
- features = datasets.Features(
87
- {
88
-
89
- "id": datasets.Value("string"),
90
- "node1": datasets.Value("string"),
91
- "relation": datasets.Value("string"),
92
- "node2": datasets.Value("string"),
93
- "node1;label": datasets.Value("string"),
94
- "node2;label": datasets.Value("string"),
95
- "relation;label": datasets.Value("string"),
96
- "relation;dimension": datasets.Value("string"),
97
- "source": datasets.Value("string"),
98
- "sentence": datasets.Value("string"),
99
-
100
- # These are the features of your dataset like images, labels ...
101
- }
102
- )
103
- # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
104
- # features = datasets.Features(
105
- # {
106
- # "sentence": datasets.Value("string"),
107
- # "option2": datasets.Value("string"),
108
- # "second_domain_answer": datasets.Value("string")
109
- # # These are the features of your dataset like images, labels ...
110
- # }
111
- # )
112
- return datasets.DatasetInfo(
113
- # This is the description that will appear on the datasets page.
114
- description=_DESCRIPTION,
115
- # This defines the different columns of the dataset and their types
116
- features=features, # Here we define them above because they are different between the two configurations
117
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
118
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
119
- # supervised_keys=("sentence", "label"),
120
- # Homepage of the dataset for documentation
121
- homepage=_HOMEPAGE,
122
- # License for the dataset if available
123
- license=_LICENSE,
124
- # Citation for the dataset
125
- citation=_CITATION,
126
- )
127
-
128
- def _split_generators(self, dl_manager):
129
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
130
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
131
-
132
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
133
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
134
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
135
- urls = _URLS[self.config.name]
136
- data_dir = dl_manager.download_and_extract(urls)
137
- return [
138
- datasets.SplitGenerator(
139
- name=datasets.Split.TRAIN,
140
- # These kwargs will be passed to _generate_examples
141
- gen_kwargs={
142
- "filepath": data_dir,
143
- "split": "train",
144
- },
145
- ),
146
- # datasets.SplitGenerator(
147
- # name=datasets.Split.VALIDATION,
148
- # # These kwargs will be passed to _generate_examples
149
- # gen_kwargs={
150
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
151
- # "split": "dev",
152
- # },
153
- # ),
154
- # datasets.SplitGenerator(
155
- # name=datasets.Split.TEST,
156
- # # These kwargs will be passed to _generate_examples
157
- # gen_kwargs={
158
- # "filepath": os.path.join(data_dir, "test.jsonl"),
159
- # "split": "test"
160
- # },
161
- # ),
162
- ]
163
-
164
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
- def _generate_examples(self, filepath, split):
166
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
167
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
168
- with open(filepath, 'rb') as f:
169
- for id_, row in enumerate(f):
170
- if self.config.name == "cskg":
171
- row = row.split(b"\t")
172
-
173
- # Yields examples as (key, example) tuples
174
- yield id_, {
175
- # "sentence": data["sentence"],
176
- # "option1": data["option1"],
177
- # "answer": "" if split == "test" else data["answer"],
178
- "id": row[0].decode("utf-8"),
179
- "node1": row[1].decode("utf-8"),
180
- "relation": row[2].decode("utf-8"),
181
- "node2": row[3].decode("utf-8"),
182
- "node1;label": row[4].decode("utf-8"),
183
- "node2;label": row[5].decode("utf-8"),
184
- "relation;label": row[6].decode("utf-8"),
185
- "relation;dimension": row[7].decode("utf-8"),
186
- "source": row[8].decode("utf-8"),
187
- "sentence": row[9].decode("utf-8"),
188
-
189
- }
190
- # else:
191
- # yield key, {
192
- # "sentence": data["sentence"],
193
- # "option2": data["option2"],
194
- # "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
195
- # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,104 +0,0 @@
1
- ---
2
- license: openrail
3
- task_categories:
4
- - question-answering
5
- language:
6
- - en
7
- ---
8
- # Dataset Card for Dataset Name
9
-
10
- ## Dataset Description
11
-
12
- - **Homepage:**
13
- - **Repository:**
14
- - **Paper:**
15
- - **Leaderboard:**
16
- - **Point of Contact:**
17
-
18
- ### Dataset Summary
19
-
20
- This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
21
-
22
- ### Supported Tasks and Leaderboards
23
-
24
- [More Information Needed]
25
-
26
- ### Languages
27
-
28
- [More Information Needed]
29
-
30
- ## Dataset Structure
31
-
32
- ### Data Instances
33
-
34
- [More Information Needed]
35
-
36
- ### Data Fields
37
-
38
- [More Information Needed]
39
-
40
- ### Data Splits
41
-
42
- [More Information Needed]
43
-
44
- ## Dataset Creation
45
-
46
- ### Curation Rationale
47
-
48
- [More Information Needed]
49
-
50
- ### Source Data
51
-
52
- #### Initial Data Collection and Normalization
53
-
54
- [More Information Needed]
55
-
56
- #### Who are the source language producers?
57
-
58
- [More Information Needed]
59
-
60
- ### Annotations
61
-
62
- #### Annotation process
63
-
64
- [More Information Needed]
65
-
66
- #### Who are the annotators?
67
-
68
- [More Information Needed]
69
-
70
- ### Personal and Sensitive Information
71
-
72
- [More Information Needed]
73
-
74
- ## Considerations for Using the Data
75
-
76
- ### Social Impact of Dataset
77
-
78
- [More Information Needed]
79
-
80
- ### Discussion of Biases
81
-
82
- [More Information Needed]
83
-
84
- ### Other Known Limitations
85
-
86
- [More Information Needed]
87
-
88
- ## Additional Information
89
-
90
- ### Dataset Curators
91
-
92
- [More Information Needed]
93
-
94
- ### Licensing Information
95
-
96
- [More Information Needed]
97
-
98
- ### Citation Information
99
-
100
- [More Information Needed]
101
-
102
- ### Contributions
103
-
104
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cskg/cskg-train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9230de5335cd646da07e79ac2c8b500fb9f093800bcb8025def074b4b3e982cc
3
+ size 126569022
cskg/cskg-train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f3c90797669170a0f048e484fc28d8dff896afe86c7781ca8922fa3b6dffc8
3
+ size 119240902
cskg/cskg-train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:272e07421cd3d866303e5df548eb11d9821d727f875168c8b71ba6140fe9daae
3
+ size 16771480