Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
f4e51d5
1 Parent(s): 79a946d

Delete loading script

Browse files
Files changed (1) hide show
  1. selqa.py +0 -300
selqa.py DELETED
@@ -1,300 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """SelQA: A New Benchmark for Selection-Based Question Answering"""
16
-
17
-
18
- import csv
19
- import json
20
-
21
- import datasets
22
-
23
-
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
- _CITATION = """\
27
- @InProceedings{7814688,
28
- author={T. {Jurczyk} and M. {Zhai} and J. D. {Choi}},
29
- booktitle={2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)},
30
- title={SelQA: A New Benchmark for Selection-Based Question Answering},
31
- year={2016},
32
- volume={},
33
- number={},
34
- pages={820-827},
35
- doi={10.1109/ICTAI.2016.0128}
36
- }
37
- """
38
-
39
- # TODO: Add description of the dataset here
40
- # You can copy an official description
41
- _DESCRIPTION = """\
42
- The SelQA dataset provides crowdsourced annotation for two selection-based question answer tasks,
43
- answer sentence selection and answer triggering.
44
- """
45
-
46
- # TODO: Add a link to an official homepage for the dataset here
47
- _HOMEPAGE = ""
48
-
49
- # TODO: Add the licence for the dataset here if you can find it
50
- _LICENSE = ""
51
-
52
- # TODO: Add link to the official dataset URLs here
53
- # The HuggingFace dataset library don't host the datasets but only point to the original files
54
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
- types = {
56
- "answer_selection": "ass",
57
- "answer_triggering": "at",
58
- }
59
-
60
- modes = {"analysis": "json", "experiments": "tsv"}
61
-
62
-
63
- class SelqaConfig(datasets.BuilderConfig):
64
- """ "BuilderConfig for SelQA Dataset"""
65
-
66
- def __init__(self, mode, type_, **kwargs):
67
- super(SelqaConfig, self).__init__(**kwargs)
68
- self.mode = mode
69
- self.type_ = type_
70
-
71
-
72
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
73
- class Selqa(datasets.GeneratorBasedBuilder):
74
- """A New Benchmark for Selection-based Question Answering."""
75
-
76
- VERSION = datasets.Version("1.1.0")
77
-
78
- # This is an example of a dataset with multiple configurations.
79
- # If you don't want/need to define several sub-sets in your dataset,
80
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
81
-
82
- # If you need to make complex sub-parts in the datasets with configurable options
83
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
84
- BUILDER_CONFIG_CLASS = SelqaConfig
85
-
86
- # You will be able to load one or the other configurations in the following list with
87
- # data = datasets.load_dataset('my_dataset', 'first_domain')
88
- # data = datasets.load_dataset('my_dataset', 'second_domain')
89
- BUILDER_CONFIGS = [
90
- SelqaConfig(
91
- name="answer_selection_analysis",
92
- mode="analysis",
93
- type_="answer_selection",
94
- version=VERSION,
95
- description="This part covers answer selection analysis",
96
- ),
97
- SelqaConfig(
98
- name="answer_selection_experiments",
99
- mode="experiments",
100
- type_="answer_selection",
101
- version=VERSION,
102
- description="This part covers answer selection experiments",
103
- ),
104
- SelqaConfig(
105
- name="answer_triggering_analysis",
106
- mode="analysis",
107
- type_="answer_triggering",
108
- version=VERSION,
109
- description="This part covers answer triggering analysis",
110
- ),
111
- SelqaConfig(
112
- name="answer_triggering_experiments",
113
- mode="experiments",
114
- type_="answer_triggering",
115
- version=VERSION,
116
- description="This part covers answer triggering experiments",
117
- ),
118
- ]
119
-
120
- DEFAULT_CONFIG_NAME = "answer_selection_analysis" # It's not mandatory to have a default configuration. Just use one if it make sense.
121
-
122
- def _info(self):
123
- if (
124
- self.config.mode == "experiments"
125
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
126
- features = datasets.Features(
127
- {
128
- "question": datasets.Value("string"),
129
- "candidate": datasets.Value("string"),
130
- "label": datasets.ClassLabel(names=["0", "1"]),
131
- }
132
- )
133
- else:
134
- if self.config.type_ == "answer_selection":
135
- features = datasets.Features(
136
- {
137
- "section": datasets.Value("string"),
138
- "question": datasets.Value("string"),
139
- "article": datasets.Value("string"),
140
- "is_paraphrase": datasets.Value("bool"),
141
- "topic": datasets.ClassLabel(
142
- names=[
143
- "MUSIC",
144
- "TV",
145
- "TRAVEL",
146
- "ART",
147
- "SPORT",
148
- "COUNTRY",
149
- "MOVIES",
150
- "HISTORICAL EVENTS",
151
- "SCIENCE",
152
- "FOOD",
153
- ]
154
- ),
155
- "answers": datasets.Sequence(datasets.Value("int32")),
156
- "candidates": datasets.Sequence(datasets.Value("string")),
157
- "q_types": datasets.Sequence(
158
- datasets.ClassLabel(names=["what", "why", "when", "who", "where", "how", ""])
159
- ),
160
- }
161
- )
162
- else:
163
- features = datasets.Features(
164
- {
165
- "section": datasets.Value("string"),
166
- "question": datasets.Value("string"),
167
- "article": datasets.Value("string"),
168
- "is_paraphrase": datasets.Value("bool"),
169
- "topic": datasets.ClassLabel(
170
- names=[
171
- "MUSIC",
172
- "TV",
173
- "TRAVEL",
174
- "ART",
175
- "SPORT",
176
- "COUNTRY",
177
- "MOVIES",
178
- "HISTORICAL EVENTS",
179
- "SCIENCE",
180
- "FOOD",
181
- ]
182
- ),
183
- "q_types": datasets.Sequence(
184
- datasets.ClassLabel(names=["what", "why", "when", "who", "where", "how", ""])
185
- ),
186
- "candidate_list": datasets.Sequence(
187
- {
188
- "article": datasets.Value("string"),
189
- "section": datasets.Value("string"),
190
- "candidates": datasets.Sequence(datasets.Value("string")),
191
- "answers": datasets.Sequence(datasets.Value("int32")),
192
- }
193
- ),
194
- }
195
- )
196
- return datasets.DatasetInfo(
197
- # This is the description that will appear on the datasets page.
198
- description=_DESCRIPTION,
199
- # This defines the different columns of the dataset and their types
200
- features=features, # Here we define them above because they are different between the two configurations
201
- # If there's a common (input, target) tuple from the features,
202
- # specify them here. They'll be used if as_supervised=True in
203
- # builder.as_dataset.
204
- supervised_keys=None,
205
- # Homepage of the dataset for documentation
206
- homepage=_HOMEPAGE,
207
- # License for the dataset if available
208
- license=_LICENSE,
209
- # Citation for the dataset
210
- citation=_CITATION,
211
- )
212
-
213
- def _split_generators(self, dl_manager):
214
- """Returns SplitGenerators."""
215
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
216
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
217
-
218
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
219
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
220
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
221
- urls = {
222
- "train": f"https://raw.githubusercontent.com/emorynlp/selqa/master/{types[self.config.type_]}/selqa-{types[self.config.type_]}-train.{modes[self.config.mode]}",
223
- "dev": f"https://raw.githubusercontent.com/emorynlp/selqa/master/{types[self.config.type_]}/selqa-{types[self.config.type_]}-dev.{modes[self.config.mode]}",
224
- "test": f"https://raw.githubusercontent.com/emorynlp/selqa/master/{types[self.config.type_]}/selqa-{types[self.config.type_]}-test.{modes[self.config.mode]}",
225
- }
226
- data_dir = dl_manager.download_and_extract(urls)
227
- return [
228
- datasets.SplitGenerator(
229
- name=datasets.Split.TRAIN,
230
- # These kwargs will be passed to _generate_examples
231
- gen_kwargs={
232
- "filepath": data_dir["train"],
233
- "split": "train",
234
- },
235
- ),
236
- datasets.SplitGenerator(
237
- name=datasets.Split.TEST,
238
- # These kwargs will be passed to _generate_examples
239
- gen_kwargs={"filepath": data_dir["test"], "split": "test"},
240
- ),
241
- datasets.SplitGenerator(
242
- name=datasets.Split.VALIDATION,
243
- # These kwargs will be passed to _generate_examples
244
- gen_kwargs={
245
- "filepath": data_dir["dev"],
246
- "split": "dev",
247
- },
248
- ),
249
- ]
250
-
251
- def _generate_examples(self, filepath, split):
252
- """Yields examples."""
253
- # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
254
- # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
255
- # The key is not important, it's more here for legacy reason (legacy from tfds)
256
- with open(filepath, encoding="utf-8") as f:
257
- if self.config.mode == "experiments":
258
- csv_reader = csv.DictReader(
259
- f, delimiter="\t", quoting=csv.QUOTE_NONE, fieldnames=["question", "candidate", "label"]
260
- )
261
- for id_, row in enumerate(csv_reader):
262
- yield id_, row
263
- else:
264
- if self.config.type_ == "answer_selection":
265
- for row in f:
266
- data = json.loads(row)
267
- for id_, item in enumerate(data):
268
- yield id_, {
269
- "section": item["section"],
270
- "question": item["question"],
271
- "article": item["article"],
272
- "is_paraphrase": item["is_paraphrase"],
273
- "topic": item["topic"],
274
- "answers": item["answers"],
275
- "candidates": item["candidates"],
276
- "q_types": item["q_types"],
277
- }
278
- else:
279
- for row in f:
280
- data = json.loads(row)
281
- for id_, item in enumerate(data):
282
- candidate_list = []
283
- for entity in item["candidate_list"]:
284
- candidate_list.append(
285
- {
286
- "article": entity["article"],
287
- "section": entity["section"],
288
- "answers": entity["answers"],
289
- "candidates": entity["candidates"],
290
- }
291
- )
292
- yield id_, {
293
- "section": item["section"],
294
- "question": item["question"],
295
- "article": item["article"],
296
- "is_paraphrase": item["is_paraphrase"],
297
- "topic": item["topic"],
298
- "q_types": item["q_types"],
299
- "candidate_list": candidate_list,
300
- }