Load from reformatted data (jsonl, at qasrl.org) by default - aligned question slots with qasrl-2018 data
Browse files
qanom.py
CHANGED
@@ -20,7 +20,9 @@ from typing import Optional, Tuple
|
|
20 |
import datasets
|
21 |
from pathlib import Path
|
22 |
import pandas as pd
|
23 |
-
|
|
|
|
|
24 |
|
25 |
_CITATION = """\
|
26 |
@inproceedings{klein2020qanom,
|
@@ -68,7 +70,8 @@ SOFTWARE."""
|
|
68 |
|
69 |
|
70 |
_URLs = {
|
71 |
-
"
|
|
|
72 |
}
|
73 |
|
74 |
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
|
@@ -78,7 +81,7 @@ class QANomBuilderConfig(datasets.BuilderConfig):
|
|
78 |
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
|
79 |
redistribute_dev: Tuple[float, float, float] = (0., 1., 0.)
|
80 |
redistribute_test: Tuple[float, float, float] = (0., 0., 1.)
|
81 |
-
|
82 |
|
83 |
|
84 |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
@@ -90,7 +93,7 @@ class Qanom(datasets.GeneratorBasedBuilder):
|
|
90 |
are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs.
|
91 |
In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """
|
92 |
|
93 |
-
VERSION = datasets.Version("1.0
|
94 |
|
95 |
BUILDER_CONFIG_CLASS = QANomBuilderConfig
|
96 |
|
@@ -149,16 +152,25 @@ class Qanom(datasets.GeneratorBasedBuilder):
|
|
149 |
|
150 |
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
|
151 |
"""Returns SplitGenerators."""
|
152 |
-
|
153 |
-
self.
|
154 |
|
155 |
-
self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs["
|
156 |
-
|
157 |
-
|
158 |
-
self.
|
159 |
-
self.
|
160 |
-
|
161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
# proportional segment (start,end) to take from every original split to returned SplitGenerator
|
164 |
orig_dev_segments = ((0, self.config.redistribute_dev[0]),
|
@@ -201,12 +213,93 @@ class Qanom(datasets.GeneratorBasedBuilder):
|
|
201 |
),
|
202 |
]
|
203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
@classmethod
|
205 |
def span_from_str(cls, s:str):
|
206 |
start, end = s.split(":")
|
207 |
return [int(start), int(end)]
|
208 |
|
209 |
-
def
|
210 |
|
211 |
""" Yields examples from a 'annot.?.csv' file in QANom's format."""
|
212 |
|
|
|
20 |
import datasets
|
21 |
from pathlib import Path
|
22 |
import pandas as pd
|
23 |
+
import gzip
|
24 |
+
import json
|
25 |
+
import itertools
|
26 |
|
27 |
_CITATION = """\
|
28 |
@inproceedings{klein2020qanom,
|
|
|
70 |
|
71 |
|
72 |
_URLs = {
|
73 |
+
"qanom_csv": "https://github.com/kleinay/QANom/raw/master/qanom_dataset.zip",
|
74 |
+
"qanom_jsonl": "https://qasrl.org/data/qanom.tar"
|
75 |
}
|
76 |
|
77 |
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
|
|
|
81 |
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
|
82 |
redistribute_dev: Tuple[float, float, float] = (0., 1., 0.)
|
83 |
redistribute_test: Tuple[float, float, float] = (0., 0., 1.)
|
84 |
+
load_from: str = "jsonl" # "csv" or "jsonl"
|
85 |
|
86 |
|
87 |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
|
|
93 |
are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs.
|
94 |
In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """
|
95 |
|
96 |
+
VERSION = datasets.Version("1.1.0")
|
97 |
|
98 |
BUILDER_CONFIG_CLASS = QANomBuilderConfig
|
99 |
|
|
|
152 |
|
153 |
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
|
154 |
"""Returns SplitGenerators."""
|
155 |
+
|
156 |
+
assert self.config.load_from in ("csv", "jsonl")
|
157 |
|
158 |
+
self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qanom_{self.config.load_from}"]))
|
159 |
+
if self.config.load_from == "csv":
|
160 |
+
# prepare wiktionary for verb inflections inside 'self.verb_inflections'
|
161 |
+
self._prepare_wiktionary_verb_inflections(dl_manager)
|
162 |
+
self.dataset_files = [
|
163 |
+
self.corpus_base_path / "annot.train.csv",
|
164 |
+
self.corpus_base_path / "annot.dev.csv",
|
165 |
+
self.corpus_base_path / "annot.test.csv"
|
166 |
+
]
|
167 |
+
elif self.config.load_from == "jsonl":
|
168 |
+
self.dataset_files = [
|
169 |
+
self.corpus_base_path / "qanom" / "train.jsonl.gz",
|
170 |
+
self.corpus_base_path / "qanom" / "dev.jsonl.gz",
|
171 |
+
self.corpus_base_path / "qanom" / "test.jsonl.gz"
|
172 |
+
]
|
173 |
+
|
174 |
|
175 |
# proportional segment (start,end) to take from every original split to returned SplitGenerator
|
176 |
orig_dev_segments = ((0, self.config.redistribute_dev[0]),
|
|
|
213 |
),
|
214 |
]
|
215 |
|
216 |
+
def _generate_examples(self, split_proportion):
|
217 |
+
if self.config.load_from == "csv":
|
218 |
+
return self._generate_examples_from_csv(split_proportion=split_proportion)
|
219 |
+
elif self.config.load_from == "jsonl":
|
220 |
+
return self._generate_examples_from_jsonl(split_proportion=split_proportion)
|
221 |
+
|
222 |
+
def _generate_examples_from_jsonl(self, split_proportion):
|
223 |
+
""" Yields examples from a jsonl.gz file, in same format as qasrl-v2."""
|
224 |
+
empty_to_underscore = lambda s: "_" if s=="" else s
|
225 |
+
def read_lines(filepath):
|
226 |
+
with gzip.open(filepath, "rt") as f:
|
227 |
+
return [line.strip() for line in f]
|
228 |
+
|
229 |
+
|
230 |
+
orig_splits_jsons = [read_lines(filepath)
|
231 |
+
for filepath in self.dataset_files] # train, dev, test
|
232 |
+
# Each json-line stands for a sentence with several predicates and QAs; we will redistribute
|
233 |
+
# the new proportions of the splits on the sentence level for convenience
|
234 |
+
lines_from_orig_splits = [jsonlines[int(len(jsonlines)*start) : int(len(jsonlines)*end)]
|
235 |
+
for jsonlines, (start,end) in zip(orig_splits_jsons, split_proportion)]
|
236 |
+
this_split_lines = list(itertools.chain(*lines_from_orig_splits))
|
237 |
+
qa_counter = 0
|
238 |
+
for line in this_split_lines:
|
239 |
+
sent_obj = json.loads(line.strip())
|
240 |
+
tokens = sent_obj['sentenceTokens']
|
241 |
+
sentence = ' '.join(tokens)
|
242 |
+
for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
|
243 |
+
verb_forms = verb_obj['verbInflectedForms']
|
244 |
+
predicate = tokens[int(predicate_idx)]
|
245 |
+
for question_obj in verb_obj['questionLabels'].values():
|
246 |
+
question_slots = question_obj['questionSlots']
|
247 |
+
verb_form = question_slots['verb']
|
248 |
+
verb_surface = verb_forms[verb_form.split(" ")[-1]] # if verb_form in verb_forms else verb_forms['stem']
|
249 |
+
question_slots_in_order = [
|
250 |
+
question_slots["wh"],
|
251 |
+
question_slots["aux"],
|
252 |
+
question_slots["subj"],
|
253 |
+
verb_surface,
|
254 |
+
question_slots["obj"],
|
255 |
+
empty_to_underscore(question_slots["prep"]), # fix bug in data
|
256 |
+
question_slots["obj2"],
|
257 |
+
'?'
|
258 |
+
]
|
259 |
+
# retrieve answers
|
260 |
+
answer_spans = []
|
261 |
+
for ans in question_obj['answerJudgments']:
|
262 |
+
if ans['isValid']:
|
263 |
+
answer_spans.extend(ans['spans'])
|
264 |
+
answer_spans = list(set(tuple(a) for a in answer_spans))
|
265 |
+
# answer_spans = list(set(answer_spans))
|
266 |
+
answer_strs = [' '.join([tokens[i] for i in range(*span)])
|
267 |
+
for span in answer_spans]
|
268 |
+
|
269 |
+
yield qa_counter, {
|
270 |
+
"sentence": sentence,
|
271 |
+
"sent_id": sent_obj['sentenceId'],
|
272 |
+
"predicate_idx": predicate_idx,
|
273 |
+
"predicate": predicate,
|
274 |
+
"is_verbal": True,
|
275 |
+
"verb_form": verb_forms['stem'],
|
276 |
+
"question": question_slots_in_order,
|
277 |
+
"answers": answer_strs,
|
278 |
+
"answer_ranges": answer_spans
|
279 |
+
}
|
280 |
+
qa_counter += 1
|
281 |
+
# also return non-predicates with empty data
|
282 |
+
for non_predicate_idx, non_predicate in sent_obj["nonPredicates"].items():
|
283 |
+
yield qa_counter, {
|
284 |
+
"sentence": sentence,
|
285 |
+
"sent_id": sent_obj['sentenceId'],
|
286 |
+
"predicate_idx": int(non_predicate_idx),
|
287 |
+
"predicate": non_predicate,
|
288 |
+
"is_verbal": False,
|
289 |
+
"verb_form": "",
|
290 |
+
"question": [],
|
291 |
+
"answers": [],
|
292 |
+
"answer_ranges": []
|
293 |
+
}
|
294 |
+
qa_counter += 1
|
295 |
+
|
296 |
+
|
297 |
@classmethod
|
298 |
def span_from_str(cls, s:str):
|
299 |
start, end = s.split(":")
|
300 |
return [int(start), int(end)]
|
301 |
|
302 |
+
def _generate_examples_from_csv(self, split_proportion):
|
303 |
|
304 |
""" Yields examples from a 'annot.?.csv' file in QANom's format."""
|
305 |
|