salavina commited on
Commit
a24b380
1 Parent(s): 308b4ad

OCW Config file

Browse files
Files changed (1) hide show
  1. OCW.py +194 -0
OCW.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Only Connect Wall (OCW) dataset"""
16
+
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @article{Naeini2023LargeLM,
25
+ title = {Large Language Models are Fixated by Red Herrings: Exploring Creative Problem Solving and Einstellung Effect using the Only Connect Wall Dataset},
26
+ author = {Saeid Alavi Naeini and Raeid Saqur and Mozhgan Saeidi and John Giorgi and Babak Taati},
27
+ year = 2023,
28
+ journal = {ArXiv},
29
+ volume = {abs/2306.11167},
30
+ url = {https://api.semanticscholar.org/CorpusID:259203717}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The Only Connect Wall (OCW) dataset contains 618 "Connecting Walls" from the Round 3: Connecting Wall segment of the Only Connect quiz show, collected from 15 seasons' worth of episodes. Each wall contains the ground-truth groups and connections as well as recorded human performance.
36
+ """
37
+
38
+ _HOMEPAGE_URL = "https://github.com/TaatiTeam/OCW/"
39
+
40
+ _LICENSE = "MIT"
41
+
42
+ _BASE_URL = "https://huggingface.co/datasets/TaatiTeam/OCW/resolve/main/"
43
+ _URLS = {
44
+ "ocw_train": _BASE_URL + "train.json",
45
+ "ocw_validation": _BASE_URL + "validation.json",
46
+ "ocw_test": _BASE_URL + "test.json",
47
+ "ocw_randomized_test": _BASE_URL + "easy_test_randomized.json",
48
+ "ocw_wordnet_train": _BASE_URL + "easy_train_wordnet.json",
49
+ "ocw_wordnet_validation": _BASE_URL + "easy_validation_wordnet.json",
50
+ "ocw_wordnet_test": _BASE_URL + "easy_test_wordnet.json"
51
+ }
52
+
53
+
54
+ class OCW(datasets.GeneratorBasedBuilder):
55
+ """OCW dataset"""
56
+
57
+ VERSION = datasets.Version("1.0.0")
58
+
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(name="ocw", version=VERSION,
61
+ description="main OCW dataset"),
62
+ datasets.BuilderConfig(name="ocw_randomized", version=VERSION,
63
+ description="Easy OCW dataset with randomized groups in each wall"),
64
+ datasets.BuilderConfig(name="ocw_wordnet", version=VERSION,
65
+ description="Easy OCW dataset with wordnet synonyms replaced with original clues")
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = "OCW"
69
+
70
+ def _info(self):
71
+ features = datasets.Features(
72
+ {
73
+ "dataset": datasets.features.Sequence(
74
+ {
75
+ "wall_id": datasets.Value("string"),
76
+ "season": datasets.Value("int32"),
77
+ "episode": datasets.Value("int32"),
78
+ "words": datasets.Value("list"),
79
+ "gt_connections": datasets.Value("list"),
80
+ "groups": datasets.features.Sequence(
81
+ {
82
+
83
+ "group_id": datasets.Value("string"),
84
+ "gt_words": datasets.Value("list"),
85
+ "gt_connection": datasets.Value("list"),
86
+ "human_performance": datasets.features.Sequence(
87
+ {
88
+ "grouping": datasets.Value("int32"),
89
+ "connection": datasets.Value("int32")
90
+ }
91
+ ),
92
+ }
93
+ ),
94
+ }
95
+ ),
96
+ }
97
+ )
98
+ return datasets.DatasetInfo(
99
+ # This is the description that will appear on the datasets page.
100
+ description=_DESCRIPTION,
101
+ # This defines the different columns of the dataset and their types
102
+ features=features,
103
+ # Homepage of the dataset for documentation
104
+ homepage=_HOMEPAGE_URL,
105
+ # License for the dataset if available
106
+ license=_LICENSE,
107
+ # Citation for the dataset
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ test_url = _URLS[self.config.name + '_test']
113
+ # only test set is randomized for ablation studies
114
+ if self.config.name == 'ocw_randomized':
115
+ train_url = _URLS['ocw_train']
116
+ validation_url = _URLS['ocw_validation']
117
+ else:
118
+ train_url = _URLS[self.config.name + '_train']
119
+ validation_url = _URLS[self.config.name + '_validation']
120
+
121
+ train_path = dl_manager.download_and_extract(train_url)
122
+ validation_path = dl_manager.download_and_extract(validation_url)
123
+ test_path = dl_manager.download_and_extract(test_url)
124
+
125
+ return [
126
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_path}),
127
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": validation_path}),
128
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_path}),
129
+ ]
130
+
131
+
132
+ def _generate_examples(self, filepath):
133
+ """This function returns the examples in the raw (text) form."""
134
+ key = 0
135
+ with open(filepath, encoding="utf-8") as f:
136
+ ocw = json.load(f)
137
+ for data in ocw["dataset"]:
138
+ wall_id = data.get("wall_id")
139
+ season = data.get("season")
140
+ episode = data.get("episode")
141
+ words = data.get("words")
142
+ gt_connections = data.get("gt_connections")
143
+ group_1 = data['groups']['group_1'].get("group_id")
144
+ group_1_human_performance = data['groups']['group_1']['human_performance']
145
+ group_2 = data['groups']['group_2']
146
+ group_2_human_performance = data['groups']['group_2']['human_performance']
147
+ group_3 = data['groups']['group_3']
148
+ group_3_human_performance = data['groups']['group_3']['human_performance']
149
+ group_4 = data['groups']['group_4']
150
+ group_4_human_performance = data['groups']['group_4']['human_performance']
151
+ yield key, {
152
+ "wall_id": wall_id,
153
+ "season": season,
154
+ "episode": episode,
155
+ "words": words,
156
+ "gt_connections": gt_connections,
157
+ "group_1": {
158
+ "id": group_1.get("group_id"),
159
+ "gt_words": group_1.get("gt_words"),
160
+ "gt_connection": group_1.get("gt_connection"),
161
+ "human_performance": {
162
+ "grouping": group_1_human_performance.get("grouping"),
163
+ "connection": group_1_human_performance.get("connection")
164
+ }
165
+ },
166
+ "group_2": {
167
+ "id": group_2.get("group_id"),
168
+ "gt_words": group_2.get("gt_words"),
169
+ "gt_connection": group_2.get("gt_connection"),
170
+ "human_performance": {
171
+ "grouping": group_2_human_performance.get("grouping"),
172
+ "connection": group_2_human_performance.get("connection")
173
+ }
174
+ },
175
+ "group_3": {
176
+ "id": group_3.get("group_id"),
177
+ "gt_words": group_3.get("gt_words"),
178
+ "gt_connection": group_3.get("gt_connection"),
179
+ "human_performance": {
180
+ "grouping": group_3_human_performance.get("grouping"),
181
+ "connection": group_3_human_performance.get("connection")
182
+ }
183
+ },
184
+ "group_4": {
185
+ "id": group_4.get("group_id"),
186
+ "gt_words": group_4.get("gt_words"),
187
+ "gt_connection": group_4.get("gt_connection"),
188
+ "human_performance": {
189
+ "grouping": group_4_human_performance.get("grouping"),
190
+ "connection": group_4_human_performance.get("connection")
191
+ }
192
+ },
193
+ }
194
+ key += 1