Datasets:

Tasks:
Other
Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
a18eee6
1 Parent(s): 66cf90a

Delete loading script

Browse files
Files changed (1) hide show
  1. generics_kb.py +0 -189
generics_kb.py DELETED
@@ -1,189 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Generics KB: A Knowledge Base of Generic Statements"""
16
-
17
-
18
- import ast
19
- import csv
20
-
21
- import datasets
22
-
23
-
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
- _CITATION = """\
27
- @InProceedings{huggingface:dataset,
28
- title = {GenericsKB: A Knowledge Base of Generic Statements},
29
- authors={Sumithra Bhakthavatsalam, Chloe Anastasiades, Peter Clark},
30
- year={2020},
31
- publisher = {Allen Institute for AI},
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- The GenericsKB contains 3.4M+ generic sentences about the world, i.e., sentences expressing general truths such as "Dogs bark," and "Trees remove carbon dioxide from the atmosphere." Generics are potentially useful as a knowledge source for AI systems requiring general world knowledge. The GenericsKB is the first large-scale resource containing naturally occurring generic sentences (as opposed to extracted or crowdsourced triples), and is rich in high-quality, general, semantically complete statements. Generics were primarily extracted from three large text sources, namely the Waterloo Corpus, selected parts of Simple Wikipedia, and the ARC Corpus. A filtered, high-quality subset is also available in GenericsKB-Best, containing 1,020,868 sentences. We recommend you start with GenericsKB-Best.
37
- """
38
-
39
- _HOMEPAGE = "https://allenai.org/data/genericskb"
40
-
41
- _LICENSE = "cc-by-4.0"
42
-
43
- _BASE_URL = "data/{0}"
44
-
45
- _URLS = {
46
- "generics_kb_best": _BASE_URL.format("GenericsKB-Best.tsv.gz"),
47
- "generics_kb": _BASE_URL.format("GenericsKB.tsv.gz"),
48
- "generics_kb_simplewiki": _BASE_URL.format("GenericsKB-SimpleWiki-With-Context.jsonl.gz"),
49
- "generics_kb_waterloo": _BASE_URL.format("GenericsKB-Waterloo-With-Context.jsonl.gz"),
50
- }
51
-
52
-
53
- class GenericsKb(datasets.GeneratorBasedBuilder):
54
- """The GenericsKB is the first large-scale resource containing naturally occurring generic sentences, and is rich in high-quality, general, semantically complete statements."""
55
-
56
- VERSION = datasets.Version("1.0.0")
57
-
58
- BUILDER_CONFIGS = [
59
- datasets.BuilderConfig(
60
- name="generics_kb_best",
61
- version=VERSION,
62
- description="This is the default and recommended config. Comprises of GENERICSKB generics with a score > 0.234 ",
63
- ),
64
- datasets.BuilderConfig(
65
- name="generics_kb", version=VERSION, description="This GENERICSKB that contains 3,433,000 sentences."
66
- ),
67
- datasets.BuilderConfig(
68
- name="generics_kb_simplewiki",
69
- version=VERSION,
70
- description="SimpleWikipedia is a filtered scrape of SimpleWikipedia pages (simple.wikipedia.org)",
71
- ),
72
- datasets.BuilderConfig(
73
- name="generics_kb_waterloo",
74
- version=VERSION,
75
- description="The Waterloo corpus is 280GB of English plain text, gathered by Charles Clarke (Univ. Waterloo) using a webcrawler in 2001 from .edu domains.",
76
- ),
77
- ]
78
-
79
- DEFAULT_CONFIG_NAME = "generics_kb_best"
80
-
81
- def _info(self):
82
- if self.config.name == "generics_kb_waterloo" or self.config.name == "generics_kb_simplewiki":
83
- featuredict = {
84
- "source_name": datasets.Value("string"),
85
- "sentence": datasets.Value("string"),
86
- "sentences_before": datasets.Sequence(datasets.Value("string")),
87
- "sentences_after": datasets.Sequence(datasets.Value("string")),
88
- "concept_name": datasets.Value("string"),
89
- "quantifiers": datasets.Sequence(datasets.Value("string")),
90
- "id": datasets.Value("string"),
91
- "bert_score": datasets.Value("float64"),
92
- }
93
- if self.config.name == "generics_kb_simplewiki":
94
- featuredict["headings"] = datasets.Sequence(datasets.Value("string"))
95
- featuredict["categories"] = datasets.Sequence(datasets.Value("string"))
96
-
97
- features = datasets.Features(featuredict)
98
-
99
- else:
100
- features = datasets.Features(
101
- {
102
- "source": datasets.Value("string"),
103
- "term": datasets.Value("string"),
104
- "quantifier_frequency": datasets.Value("string"),
105
- "quantifier_number": datasets.Value("string"),
106
- "generic_sentence": datasets.Value("string"),
107
- "score": datasets.Value("float64"),
108
- }
109
- )
110
-
111
- return datasets.DatasetInfo(
112
- # This is the description that will appear on the datasets page.
113
- description=_DESCRIPTION,
114
- # This defines the different columns of the dataset and their types
115
- features=features, # Here we define them above because they are different between the two configurations
116
- # If there's a common (input, target) tuple from the features,
117
- # specify them here. They'll be used if as_supervised=True in
118
- # builder.as_dataset.
119
- supervised_keys=None,
120
- # Homepage of the dataset for documentation
121
- homepage=_HOMEPAGE,
122
- # License for the dataset if available
123
- license=_LICENSE,
124
- # Citation for the dataset
125
- citation=_CITATION,
126
- )
127
-
128
- def _split_generators(self, dl_manager):
129
- filepath = dl_manager.download_and_extract(_URLS[self.config.name])
130
-
131
- return [
132
- datasets.SplitGenerator(
133
- name=datasets.Split.TRAIN,
134
- # These kwargs will be passed to _generate_examples
135
- gen_kwargs={
136
- "filepath": filepath,
137
- },
138
- ),
139
- ]
140
-
141
- def _generate_examples(self, filepath):
142
- """Yields examples."""
143
-
144
- if self.config.name == "generics_kb_waterloo" or self.config.name == "generics_kb_simplewiki":
145
- with open(filepath, encoding="utf-8") as f:
146
- for id_, row in enumerate(f):
147
- data = ast.literal_eval(row)
148
-
149
- result = {
150
- "source_name": data["source"]["name"],
151
- "sentence": data["knowledge"]["sentence"],
152
- "sentences_before": data["knowledge"]["context"]["sentences_before"],
153
- "sentences_after": data["knowledge"]["context"]["sentences_after"],
154
- "concept_name": data["knowledge"]["key_concepts"][0]["concept_name"],
155
- "quantifiers": data["knowledge"]["key_concepts"][0]["quantifiers"],
156
- "id": data["id"],
157
- "bert_score": data["bert_score"],
158
- }
159
- if self.config.name == "generics_kb_simplewiki":
160
- result["headings"] = data["knowledge"]["context"]["headings"]
161
- result["categories"] = data["knowledge"]["context"]["categories"]
162
-
163
- yield id_, result
164
- else:
165
- with open(filepath, encoding="utf-8") as f:
166
- # Skip the header
167
- next(f)
168
-
169
- read_tsv = csv.reader(f, delimiter="\t")
170
-
171
- for id_, row in enumerate(read_tsv):
172
- quantifier = row[2]
173
- quantifier_frequency = ""
174
- quantifier_number = ""
175
-
176
- if quantifier != "":
177
- quantifier = ast.literal_eval(quantifier)
178
- if "frequency" in quantifier.keys():
179
- quantifier_frequency = quantifier["frequency"]
180
- if "number" in quantifier.keys():
181
- quantifier_number = quantifier["number"]
182
- yield id_, {
183
- "source": row[0],
184
- "term": row[1],
185
- "quantifier_frequency": quantifier_frequency,
186
- "quantifier_number": quantifier_number,
187
- "generic_sentence": row[3],
188
- "score": row[4],
189
- }