salavina commited on
Commit
02af0df
1 Parent(s): 8baff30

Add config script

Browse files
Files changed (1) hide show
  1. OCW.py +233 -0
OCW.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Only Connect Wall (OCW) dataset"""
16
+
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @article{Naeini2023LargeLM,
25
+ title = {Large Language Models are Fixated by Red Herrings: Exploring Creative Problem Solving and Einstellung Effect using the Only Connect Wall Dataset},
26
+ author = {Saeid Alavi Naeini and Raeid Saqur and Mozhgan Saeidi and John Giorgi and Babak Taati},
27
+ year = 2023,
28
+ journal = {ArXiv},
29
+ volume = {abs/2306.11167},
30
+ url = {https://api.semanticscholar.org/CorpusID:259203717}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The Only Connect Wall (OCW) dataset contains 618 "Connecting Walls" from the Round 3: Connecting Wall segment of the Only Connect quiz show, collected from 15 seasons' worth of episodes. Each wall contains the ground-truth groups and connections as well as recorded human performance.
36
+ """
37
+
38
+ _HOMEPAGE_URL = "https://github.com/TaatiTeam/OCW/"
39
+
40
+ _LICENSE = "MIT"
41
+
42
+ _BASE_URL = "https://huggingface.co/datasets/TaatiTeam/OCW/resolve/main/"
43
+ _URLS = {
44
+ "ocw_train": _BASE_URL + "train.json",
45
+ "ocw_validation": _BASE_URL + "validation.json",
46
+ "ocw_test": _BASE_URL + "test.json",
47
+ "ocw_randomized_test": _BASE_URL + "easy_test_randomized.json",
48
+ "ocw_wordnet_train": _BASE_URL + "easy_train_wordnet.json",
49
+ "ocw_wordnet_validation": _BASE_URL + "easy_validation_wordnet.json",
50
+ "ocw_wordnet_test": _BASE_URL + "easy_test_wordnet.json"
51
+ }
52
+
53
+
54
+ class OCW(datasets.GeneratorBasedBuilder):
55
+ """OCW dataset"""
56
+
57
+ VERSION = datasets.Version("1.0.0")
58
+
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(name="ocw", version=VERSION,
61
+ description="main OCW dataset"),
62
+ datasets.BuilderConfig(name="ocw_randomized", version=VERSION,
63
+ description="Easy OCW dataset with randomized groups in each wall"),
64
+ datasets.BuilderConfig(name="ocw_wordnet", version=VERSION,
65
+ description="Easy OCW dataset with wordnet synonyms replaced with original clues")
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = "ocw"
69
+
70
+ def _info(self):
71
+ features = datasets.Features(
72
+ {
73
+ # "total_walls_in_season": datasets.Value("int32"),
74
+ # "season_start_date": datasets.Value("string"),
75
+ # "season_end_date": datasets.Value("string"),
76
+ "wall_id": datasets.Value("string"),
77
+ "season": datasets.Value("int32"),
78
+ "episode": datasets.Value("int32"),
79
+ "words": datasets.Value("string"),
80
+ "gt_connections": datasets.Value("string"),
81
+ "group_1":
82
+ {
83
+ "group_id": datasets.Value("string"),
84
+ "gt_words": datasets.Value("string"),
85
+ "gt_connection": datasets.Value("string"),
86
+ "human_performance":
87
+ {
88
+ "grouping": datasets.Value("int32"),
89
+ "connection": datasets.Value("int32")
90
+ }
91
+ },
92
+ "group_2":
93
+ {
94
+ "group_id": datasets.Value("string"),
95
+ "gt_words": datasets.Value("string"),
96
+ "gt_connection": datasets.Value("string"),
97
+ "human_performance":
98
+ {
99
+ "grouping": datasets.Value("int32"),
100
+ "connection": datasets.Value("int32")
101
+ }
102
+ },
103
+ "group_3":
104
+ {
105
+ "group_id": datasets.Value("string"),
106
+ "gt_words": datasets.Value("string"),
107
+ "gt_connection": datasets.Value("string"),
108
+ "human_performance":
109
+ {
110
+ "grouping": datasets.Value("int32"),
111
+ "connection": datasets.Value("int32")
112
+ }
113
+ },
114
+ "group_4":
115
+ {
116
+ "group_id": datasets.Value("string"),
117
+ "gt_words": datasets.Value("string"),
118
+ "gt_connection": datasets.Value("string"),
119
+ "human_performance":
120
+ {
121
+ "grouping": datasets.Value("int32"),
122
+ "connection": datasets.Value("int32")
123
+ }
124
+ },
125
+
126
+ }
127
+ )
128
+ return datasets.DatasetInfo(
129
+ # This is the description that will appear on the datasets page.
130
+ description=_DESCRIPTION,
131
+ # This defines the different columns of the dataset and their types
132
+ features= features,
133
+ # Homepage of the dataset for documentation
134
+ homepage=_HOMEPAGE_URL,
135
+ # License for the dataset if available
136
+ license=_LICENSE,
137
+ # Citation for the dataset
138
+ citation=_CITATION,
139
+ # No default supervised_keys
140
+ supervised_keys=None
141
+ )
142
+
143
+ def _split_generators(self, dl_manager):
144
+ test_url = _URLS[self.config.name + '_test']
145
+ # only test set is randomized for ablation studies
146
+ if self.config.name == 'ocw_randomized':
147
+ train_url = _URLS['ocw_train']
148
+ validation_url = _URLS['ocw_validation']
149
+ else:
150
+ train_url = _URLS[self.config.name + '_train']
151
+ validation_url = _URLS[self.config.name + '_validation']
152
+
153
+ train_path = dl_manager.download_and_extract(train_url)
154
+ validation_path = dl_manager.download_and_extract(validation_url)
155
+ test_path = dl_manager.download_and_extract(test_url)
156
+
157
+ return [
158
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
159
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
160
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
161
+ ]
162
+
163
+
164
+ def _generate_examples(self, filepath):
165
+ """This function returns the examples in the raw (text) form."""
166
+ key = 0
167
+ with open(filepath, encoding="utf-8") as f:
168
+ ocw = json.load(f)
169
+ for data in ocw["dataset"]:
170
+ wall_id = data.get("wall_id")
171
+ season = data.get("season")
172
+ # season_to_walls_map = ocw['season_to_walls_map'][str(season)]
173
+ # total_walls_in_season = season_to_walls_map["num_walls"]
174
+ # season_start_date = season_to_walls_map["start_date"]
175
+ # season_end_date = season_to_walls_map["end_date"]
176
+ episode = data.get("episode")
177
+ words = data.get("words")
178
+ gt_connections = data.get("gt_connections")
179
+ group_1 = data['groups']['group_1']
180
+ group_1_human_performance = group_1['human_performance']
181
+ group_2 = data['groups']['group_2']
182
+ group_2_human_performance = group_2['human_performance']
183
+ group_3 = data['groups']['group_3']
184
+ group_3_human_performance = group_3['human_performance']
185
+ group_4 = data['groups']['group_4']
186
+ group_4_human_performance = group_4['human_performance']
187
+ yield key, {
188
+ # "total_walls_in_season": total_walls_in_season,
189
+ # "season_start_date": season_start_date,
190
+ # "season_end_date": season_end_date,
191
+ "wall_id": wall_id,
192
+ "season": season,
193
+ "episode": episode,
194
+ "words": words,
195
+ "gt_connections": gt_connections,
196
+ "group_1": {
197
+ "group_id": group_1.get("group_id"),
198
+ "gt_words": group_1.get("gt_words"),
199
+ "gt_connection": group_1.get("gt_connection"),
200
+ "human_performance": {
201
+ "grouping": group_1_human_performance.get("grouping"),
202
+ "connection": group_1_human_performance.get("connection")
203
+ }
204
+ },
205
+ "group_2": {
206
+ "group_id": group_2.get("group_id"),
207
+ "gt_words": group_2.get("gt_words"),
208
+ "gt_connection": group_2.get("gt_connection"),
209
+ "human_performance": {
210
+ "grouping": group_2_human_performance.get("grouping"),
211
+ "connection": group_2_human_performance.get("connection")
212
+ }
213
+ },
214
+ "group_3": {
215
+ "group_id": group_3.get("group_id"),
216
+ "gt_words": group_3.get("gt_words"),
217
+ "gt_connection": group_3.get("gt_connection"),
218
+ "human_performance": {
219
+ "grouping": group_3_human_performance.get("grouping"),
220
+ "connection": group_3_human_performance.get("connection")
221
+ }
222
+ },
223
+ "group_4": {
224
+ "group_id": group_4.get("group_id"),
225
+ "gt_words": group_4.get("gt_words"),
226
+ "gt_connection": group_4.get("gt_connection"),
227
+ "human_performance": {
228
+ "grouping": group_4_human_performance.get("grouping"),
229
+ "connection": group_4_human_performance.get("connection")
230
+ }
231
+ },
232
+ }
233
+ key += 1