salavina commited on
Commit
8baff30
1 Parent(s): 2230fe2

Delete OCW.py

Browse files
Files changed (1) hide show
  1. OCW.py +0 -186
OCW.py DELETED
@@ -1,186 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """Only Connect Wall (OCW) dataset"""
16
-
17
- import json
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @article{Naeini2023LargeLM,
25
- title = {Large Language Models are Fixated by Red Herrings: Exploring Creative Problem Solving and Einstellung Effect using the Only Connect Wall Dataset},
26
- author = {Saeid Alavi Naeini and Raeid Saqur and Mozhgan Saeidi and John Giorgi and Babak Taati},
27
- year = 2023,
28
- journal = {ArXiv},
29
- volume = {abs/2306.11167},
30
- url = {https://api.semanticscholar.org/CorpusID:259203717}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- The Only Connect Wall (OCW) dataset contains 618 "Connecting Walls" from the Round 3: Connecting Wall segment of the Only Connect quiz show, collected from 15 seasons' worth of episodes. Each wall contains the ground-truth groups and connections as well as recorded human performance.
36
- """
37
-
38
- _HOMEPAGE_URL = "https://github.com/TaatiTeam/OCW/"
39
-
40
- _LICENSE = "MIT"
41
-
42
- _BASE_URL = "https://huggingface.co/datasets/TaatiTeam/OCW/resolve/main/"
43
- _URLS = {
44
- "ocw_train": _BASE_URL + "train.json",
45
- "ocw_validation": _BASE_URL + "validation.json",
46
- "ocw_test": _BASE_URL + "test.json",
47
- "ocw_randomized_test": _BASE_URL + "easy_test_randomized.json",
48
- "ocw_wordnet_train": _BASE_URL + "easy_train_wordnet.json",
49
- "ocw_wordnet_validation": _BASE_URL + "easy_validation_wordnet.json",
50
- "ocw_wordnet_test": _BASE_URL + "easy_test_wordnet.json"
51
- }
52
-
53
-
54
- class OCW(datasets.GeneratorBasedBuilder):
55
- """OCW dataset"""
56
-
57
- VERSION = datasets.Version("1.0.0")
58
-
59
- BUILDER_CONFIGS = [
60
- datasets.BuilderConfig(name="ocw", version=VERSION,
61
- description="main OCW dataset"),
62
- datasets.BuilderConfig(name="ocw_randomized", version=VERSION,
63
- description="Easy OCW dataset with randomized groups in each wall"),
64
- datasets.BuilderConfig(name="ocw_wordnet", version=VERSION,
65
- description="Easy OCW dataset with wordnet synonyms replaced with original clues")
66
- ]
67
-
68
- DEFAULT_CONFIG_NAME = "OCW"
69
-
70
- def _info(self):
71
- features = datasets.Features(
72
- {
73
- "dataset": datasets.features.Sequence(
74
- {
75
- "wall_id": datasets.Value("string"),
76
- "season": datasets.Value("int32"),
77
- "episode": datasets.Value("int32"),
78
- "words": datasets.Value("list"),
79
- "gt_connections": datasets.Value("list"),
80
- "group_id": datasets.Value("string"),
81
- "gt_words": datasets.Value("list"),
82
- "gt_connection": datasets.Value("list"),
83
- "grouping": datasets.Value("int32"),
84
- "connection": datasets.Value("int32")
85
- }
86
- ),
87
- }
88
- ),
89
-
90
- return datasets.DatasetInfo(
91
- # This is the description that will appear on the datasets page.
92
- description=_DESCRIPTION,
93
- # This defines the different columns of the dataset and their types
94
- features=features,
95
- # Homepage of the dataset for documentation
96
- homepage=_HOMEPAGE_URL,
97
- # License for the dataset if available
98
- license=_LICENSE,
99
- # Citation for the dataset
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- test_url = _URLS[self.config.name + '_test']
105
- # only test set is randomized for ablation studies
106
- if 'ocw_randomized' in self.config.name:
107
- train_url = _URLS['ocw_train']
108
- validation_url = _URLS['ocw_validation']
109
- else:
110
- train_url = _URLS[self.config.name + '_train']
111
- validation_url = _URLS[self.config.name + '_validation']
112
-
113
- train_path = dl_manager.download_and_extract(train_url)
114
- validation_path = dl_manager.download_and_extract(validation_url)
115
- test_path = dl_manager.download_and_extract(test_url)
116
-
117
- return [
118
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_path}),
119
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": validation_path}),
120
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_path}),
121
- ]
122
-
123
-
124
- def _generate_examples(self, filepath):
125
- """This function returns the examples in the raw (text) form."""
126
- key = 0
127
- with open(filepath, encoding="utf-8") as f:
128
- ocw = json.load(f)
129
- for data in ocw["dataset"]:
130
- wall_id = data.get("wall_id")
131
- season = data.get("season")
132
- episode = data.get("episode")
133
- words = data.get("words")
134
- gt_connections = data.get("gt_connections")
135
- group_1 = data['groups']['group_1'].get("group_id")
136
- group_1_human_performance = data['groups']['group_1']['human_performance']
137
- group_2 = data['groups']['group_2']
138
- group_2_human_performance = data['groups']['group_2']['human_performance']
139
- group_3 = data['groups']['group_3']
140
- group_3_human_performance = data['groups']['group_3']['human_performance']
141
- group_4 = data['groups']['group_4']
142
- group_4_human_performance = data['groups']['group_4']['human_performance']
143
- yield key, {
144
- "wall_id": wall_id,
145
- "season": season,
146
- "episode": episode,
147
- "words": words,
148
- "gt_connections": gt_connections,
149
- "group_1": {
150
- "id": group_1.get("group_id"),
151
- "gt_words": group_1.get("gt_words"),
152
- "gt_connection": group_1.get("gt_connection"),
153
- "human_performance": {
154
- "grouping": group_1_human_performance.get("grouping"),
155
- "connection": group_1_human_performance.get("connection")
156
- }
157
- },
158
- "group_2": {
159
- "id": group_2.get("group_id"),
160
- "gt_words": group_2.get("gt_words"),
161
- "gt_connection": group_2.get("gt_connection"),
162
- "human_performance": {
163
- "grouping": group_2_human_performance.get("grouping"),
164
- "connection": group_2_human_performance.get("connection")
165
- }
166
- },
167
- "group_3": {
168
- "id": group_3.get("group_id"),
169
- "gt_words": group_3.get("gt_words"),
170
- "gt_connection": group_3.get("gt_connection"),
171
- "human_performance": {
172
- "grouping": group_3_human_performance.get("grouping"),
173
- "connection": group_3_human_performance.get("connection")
174
- }
175
- },
176
- "group_4": {
177
- "id": group_4.get("group_id"),
178
- "gt_words": group_4.get("gt_words"),
179
- "gt_connection": group_4.get("gt_connection"),
180
- "human_performance": {
181
- "grouping": group_4_human_performance.get("grouping"),
182
- "connection": group_4_human_performance.get("connection")
183
- }
184
- },
185
- }
186
- key += 1