Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
2f5bad6
1 Parent(s): a98b82a

Delete loading script

Browse files
Files changed (1) hide show
  1. glucose.py +0 -160
glucose.py DELETED
@@ -1,160 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """GLUCOSE: GeneraLized and COntextualized Story Explanations, is a novel conceptual framework and dataset for commonsense reasoning. Given a short story and a sentence X in the story, GLUCOSE captures ten dimensions of causal explanation related to X. These dimensions, inspired by human cognitive psychology, cover often-implicit causes and effects of X, including events, location, possession, and other attributes."""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @inproceedings{mostafazadeh2020glucose,
27
- title={GLUCOSE: GeneraLized and COntextualized Story Explanations},
28
- author={Nasrin Mostafazadeh and Aditya Kalyanpur and Lori Moon and David Buchanan and Lauren Berkowitz and Or Biran and Jennifer Chu-Carroll},
29
- year={2020},
30
- booktitle={The Conference on Empirical Methods in Natural Language Processing},
31
- publisher={Association for Computational Linguistics}
32
- }
33
- """
34
-
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- When humans read or listen, they make implicit commonsense inferences that frame their understanding of what happened and why. As a step toward AI systems that can build similar mental models, we introduce GLUCOSE, a large-scale dataset of implicit commonsense causal knowledge, encoded as causal mini-theories about the world, each grounded in a narrative context.
38
- """
39
-
40
- _HOMEPAGE = "https://github.com/ElementalCognition/glucose"
41
-
42
- _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
43
-
44
- _URLs = {
45
- "glucose": {
46
- "test": "https://raw.githubusercontent.com/ElementalCognition/glucose/master/test/test_set_no_answers.csv",
47
- "train": "https://github.com/TevenLeScao/glucose/blob/master/GLUCOSE_training_data.zip?raw=true",
48
- }
49
- }
50
-
51
-
52
- class Glucose(datasets.GeneratorBasedBuilder):
53
- """GLUCOSE: GeneraLized and COntextualized Story Explanations, is a novel conceptual framework and dataset for commonsense reasoning."""
54
-
55
- VERSION = datasets.Version("1.1.0")
56
- BUILDER_CONFIGS = [
57
- datasets.BuilderConfig(name="glucose", description="Main dataset"),
58
- ]
59
-
60
- def _info(self):
61
- feature_dict = {
62
- "experiment_id": datasets.Value("string"),
63
- "story_id": datasets.Value("string"),
64
- # The train set contains only one ID in numeric form
65
- "worker_id": datasets.Value("int64"),
66
- # The test set contains several IDs in string form
67
- "worker_ids": datasets.Value("string"),
68
- "submission_time_normalized": datasets.Value("string"),
69
- "worker_quality_assessment": datasets.Value("int64"),
70
- "selected_sentence_index": datasets.Value("int64"),
71
- "story": datasets.Value("string"),
72
- "selected_sentence": datasets.Value("string"),
73
- "number_filled_in": datasets.Value("int64"),
74
- }
75
- for i in range(1, 11):
76
- feature_dict[f"{i}_specificNL"] = datasets.Value("string")
77
- feature_dict[f"{i}_specificStructured"] = datasets.Value("string")
78
- feature_dict[f"{i}_generalNL"] = datasets.Value("string")
79
- feature_dict[f"{i}_generalStructured"] = datasets.Value("string")
80
- features = datasets.Features(feature_dict)
81
- return datasets.DatasetInfo(
82
- description=_DESCRIPTION,
83
- features=features,
84
- supervised_keys=None,
85
- homepage=_HOMEPAGE,
86
- license=_LICENSE,
87
- citation=_CITATION,
88
- )
89
-
90
- def _split_generators(self, dl_manager):
91
- """Returns SplitGenerators."""
92
- train_url = _URLs[self.config.name]["train"]
93
- test_url = _URLs[self.config.name]["test"]
94
- train_data = dl_manager.download_and_extract(train_url)
95
- test_data = dl_manager.download_and_extract(test_url)
96
- return [
97
- datasets.SplitGenerator(
98
- name=datasets.Split.TRAIN,
99
- gen_kwargs={
100
- "filepath": os.path.join(train_data, "GLUCOSE_training_data_final.csv"),
101
- "split": "train",
102
- },
103
- ),
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TEST,
106
- gen_kwargs={"filepath": test_data, "split": "test"},
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath, split):
111
- with open(filepath, encoding="utf8") as f:
112
- data = csv.reader(f)
113
- next(data)
114
- for id_, row in enumerate(data):
115
- if split == "train":
116
- yield id_, train_dict_from_row(row)
117
- else:
118
- yield id_, test_dict_from_row(row)
119
-
120
-
121
- def train_dict_from_row(row):
122
- return_dict = {
123
- "experiment_id": row[0],
124
- "story_id": row[1],
125
- "worker_id": row[2],
126
- "worker_ids": "",
127
- "submission_time_normalized": row[3],
128
- "worker_quality_assessment": row[4],
129
- "selected_sentence_index": row[5],
130
- "story": row[6],
131
- "selected_sentence": row[7],
132
- "number_filled_in": row[48],
133
- }
134
- for i in range(1, 11):
135
- return_dict[f"{i}_specificNL"] = row[4 * i + 4]
136
- return_dict[f"{i}_specificStructured"] = row[4 * i + 5]
137
- return_dict[f"{i}_generalNL"] = row[4 * i + 6]
138
- return_dict[f"{i}_generalStructured"] = row[4 * i + 7]
139
- return return_dict
140
-
141
-
142
- def test_dict_from_row(row):
143
- return_dict = {
144
- "experiment_id": "",
145
- "story_id": row[0],
146
- "worker_id": -1,
147
- "worker_ids": row[3],
148
- "submission_time_normalized": "",
149
- "worker_quality_assessment": -1,
150
- "selected_sentence_index": -1,
151
- "story": row[1],
152
- "selected_sentence": row[2],
153
- "number_filled_in": -1,
154
- }
155
- for i in range(1, 11):
156
- return_dict[f"{i}_specificNL"] = row[2 * i + 2]
157
- return_dict[f"{i}_generalNL"] = row[2 * i + 3]
158
- return_dict[f"{i}_specificStructured"] = ""
159
- return_dict[f"{i}_generalStructured"] = ""
160
- return return_dict