|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""A Dataset loading script for the QANom dataset (klein et. al., COLING 2000).""" |
|
|
|
|
|
from dataclasses import dataclass |
|
from typing import Optional, Tuple, Union, Iterable, Set |
|
import datasets |
|
from pathlib import Path |
|
import pandas as pd |
|
import gzip |
|
import json |
|
import itertools |
|
|
|
_CITATION = """\ |
|
@inproceedings{klein2020qanom, |
|
title={QANom: Question-Answer driven SRL for Nominalizations}, |
|
author={Klein, Ayal and Mamou, Jonathan and Pyatkin, Valentina and Stepanov, Daniela and He, Hangfeng and Roth, Dan and Zettlemoyer, Luke and Dagan, Ido}, |
|
booktitle={Proceedings of the 28th International Conference on Computational Linguistics}, |
|
pages={3069--3083}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains question-answer pairs to model predicate-argument structure of deverbal nominalizations. |
|
The questions start with wh-words (Who, What, Where, What, etc.) and contain a the verbal form of a nominalization from the sentence; |
|
the answers are phrases in the sentence. |
|
See the paper for details: QANom: Question-Answer driven SRL for Nominalizations (Klein et. al., COLING 2020) |
|
For previewing the QANom data along with the verbal annotations of QASRL, check out "https://browse.qasrl.org/". |
|
This dataset was annotated by selected workers from Amazon Mechanical Turk. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/kleinay/QANom" |
|
|
|
_LICENSE = """MIT License |
|
|
|
Copyright (c) 2020 Ayal Klein (kleinay) |
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
of this software and associated documentation files (the "Software"), to deal |
|
in the Software without restriction, including without limitation the rights |
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
copies of the Software, and to permit persons to whom the Software is |
|
furnished to do so, subject to the following conditions: |
|
|
|
The above copyright notice and this permission notice shall be included in all |
|
copies or substantial portions of the Software. |
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
SOFTWARE.""" |
|
|
|
|
|
_URLs = { |
|
"qanom_csv": "https://github.com/kleinay/QANom/raw/master/qanom_dataset.zip", |
|
"qanom_jsonl": "https://qasrl.org/data/qanom.tar" |
|
} |
|
|
|
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2) |
|
|
|
SUPPOERTED_DOMAINS = {"wikinews", "wikipedia"} |
|
|
|
@dataclass |
|
class QANomBuilderConfig(datasets.BuilderConfig): |
|
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """ |
|
redistribute_dev: Tuple[float, float, float] = (0., 1., 0.) |
|
redistribute_test: Tuple[float, float, float] = (0., 0., 1.) |
|
load_from: str = "jsonl" |
|
domains: Union[str, Iterable[str]] = "all" |
|
|
|
|
|
|
|
|
|
class Qanom(datasets.GeneratorBasedBuilder): |
|
"""QANom: Question-Answer driven SRL for Nominalizations corpus. |
|
Notice: This datasets genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets. |
|
However, it extends Features to include "is_verbal" and "verb_form" fields (required for nominalizations). |
|
In addition, and most critically, unlike these verbal qasrl datasets, in the qanom datset some examples |
|
are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs. |
|
In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """ |
|
|
|
VERSION = datasets.Version("1.2.0") |
|
|
|
BUILDER_CONFIG_CLASS = QANomBuilderConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
QANomBuilderConfig( |
|
name="default", version=VERSION, description="This provides the QANom dataset" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"default" |
|
) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"sent_id": datasets.Value("string"), |
|
"predicate_idx": datasets.Value("int32"), |
|
"predicate": datasets.Value("string"), |
|
"is_verbal": datasets.Value("bool"), |
|
"verb_form": datasets.Value("string"), |
|
"question": datasets.Sequence(datasets.Value("string")), |
|
"answers": datasets.Sequence(datasets.Value("string")), |
|
"answer_ranges": datasets.Sequence(SpanFeatureType) |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _prepare_wiktionary_verb_inflections(self, dl_manager): |
|
wiktionary_url = "https://raw.githubusercontent.com/nafitzgerald/nrl-qasrl/master/data/wiktionary/en_verb_inflections.txt" |
|
wiktionary_path = dl_manager.download(wiktionary_url) |
|
verb_map = {} |
|
with open(wiktionary_path, 'r', encoding="utf-8") as f: |
|
for l in f.readlines(): |
|
inflections = l.strip().split('\t') |
|
stem, presentsingular3rd, presentparticiple, past, pastparticiple = inflections |
|
for inf in inflections: |
|
verb_map[inf] = {"Stem" : stem, "PresentSingular3rd" : presentsingular3rd, "PresentParticiple":presentparticiple, "Past":past, "PastParticiple":pastparticiple} |
|
self.verb_inflections = verb_map |
|
|
|
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
|
|
assert self.config.load_from in ("csv", "jsonl") |
|
|
|
|
|
domains: Set[str] = [] |
|
if self.config.domains == "all": |
|
domains = SUPPOERTED_DOMAINS |
|
elif isinstance(self.config.domains, str): |
|
if self.config.domains in SUPPOERTED_DOMAINS: |
|
domains = {self.config.domains} |
|
else: |
|
raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported") |
|
else: |
|
domains = set(self.config.domains) & SUPPOERTED_DOMAINS |
|
if len(domains) == 0: |
|
raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported") |
|
self.config.domains = domains |
|
|
|
self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qanom_{self.config.load_from}"])) |
|
if self.config.load_from == "csv": |
|
|
|
self._prepare_wiktionary_verb_inflections(dl_manager) |
|
self.dataset_files = [ |
|
self.corpus_base_path / "annot.train.csv", |
|
self.corpus_base_path / "annot.dev.csv", |
|
self.corpus_base_path / "annot.test.csv" |
|
] |
|
elif self.config.load_from == "jsonl": |
|
self.dataset_files = [ |
|
self.corpus_base_path / "qanom" / "train.jsonl.gz", |
|
self.corpus_base_path / "qanom" / "dev.jsonl.gz", |
|
self.corpus_base_path / "qanom" / "test.jsonl.gz" |
|
] |
|
|
|
|
|
|
|
orig_dev_segments = ((0, self.config.redistribute_dev[0]), |
|
(self.config.redistribute_dev[0], sum(self.config.redistribute_dev[:2])), |
|
(sum(self.config.redistribute_dev[:2]), 1)) |
|
orig_tst_segments = ((0, self.config.redistribute_test[0]), |
|
(self.config.redistribute_test[0], sum(self.config.redistribute_test[:2])), |
|
(sum(self.config.redistribute_test[:2]), 1)) |
|
train_proportion = ((0,1), |
|
orig_dev_segments[0], |
|
orig_tst_segments[0]) |
|
dev_proportion = ((0,0), |
|
orig_dev_segments[1], |
|
orig_tst_segments[1]) |
|
test_proportion = ((0,0), |
|
orig_dev_segments[2], |
|
orig_tst_segments[2]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"split_proportion": train_proportion |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"split_proportion": dev_proportion |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"split_proportion": test_proportion |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, split_proportion): |
|
if self.config.load_from == "csv": |
|
return self._generate_examples_from_csv(split_proportion=split_proportion) |
|
elif self.config.load_from == "jsonl": |
|
return self._generate_examples_from_jsonl(split_proportion=split_proportion) |
|
|
|
def _generate_examples_from_jsonl(self, split_proportion): |
|
""" Yields examples from a jsonl.gz file, in same format as qasrl-v2.""" |
|
empty_to_underscore = lambda s: "_" if s=="" else s |
|
def read_lines(filepath): |
|
with gzip.open(filepath, "rt") as f: |
|
return [line.strip() for line in f] |
|
|
|
|
|
orig_splits_jsons = [read_lines(filepath) |
|
for filepath in self.dataset_files] |
|
|
|
|
|
lines_from_orig_splits = [jsonlines[int(len(jsonlines)*start) : int(len(jsonlines)*end)] |
|
for jsonlines, (start,end) in zip(orig_splits_jsons, split_proportion)] |
|
this_split_lines = list(itertools.chain(*lines_from_orig_splits)) |
|
qa_counter = 0 |
|
for line in this_split_lines: |
|
sent_obj = json.loads(line.strip()) |
|
tokens = sent_obj['sentenceTokens'] |
|
sentence = ' '.join(tokens) |
|
sent_id = sent_obj['sentenceId'] |
|
|
|
sent_domain = sent_id.split(":")[1] |
|
if sent_domain not in self.config.domains: |
|
continue |
|
for predicate_idx, verb_obj in sent_obj['verbEntries'].items(): |
|
verb_forms = verb_obj['verbInflectedForms'] |
|
predicate = tokens[int(predicate_idx)] |
|
for question_obj in verb_obj['questionLabels'].values(): |
|
question_slots = question_obj['questionSlots'] |
|
verb_form = question_slots['verb'] |
|
verb_surface = verb_forms[verb_form.split(" ")[-1]] |
|
question_slots_in_order = [ |
|
question_slots["wh"], |
|
question_slots["aux"], |
|
question_slots["subj"], |
|
verb_surface, |
|
question_slots["obj"], |
|
empty_to_underscore(question_slots["prep"]), |
|
question_slots["obj2"], |
|
'?' |
|
] |
|
|
|
answer_spans = [] |
|
for ans in question_obj['answerJudgments']: |
|
if ans['isValid']: |
|
answer_spans.extend(ans['spans']) |
|
answer_spans = list(set(tuple(a) for a in answer_spans)) |
|
|
|
answer_strs = [' '.join([tokens[i] for i in range(*span)]) |
|
for span in answer_spans] |
|
|
|
yield qa_counter, { |
|
"sentence": sentence, |
|
"sent_id": sent_id, |
|
"predicate_idx": predicate_idx, |
|
"predicate": predicate, |
|
"is_verbal": True, |
|
"verb_form": verb_forms['stem'], |
|
"question": question_slots_in_order, |
|
"answers": answer_strs, |
|
"answer_ranges": answer_spans |
|
} |
|
qa_counter += 1 |
|
|
|
for non_predicate_idx, non_predicate in sent_obj["nonPredicates"].items(): |
|
yield qa_counter, { |
|
"sentence": sentence, |
|
"sent_id": sent_obj['sentenceId'], |
|
"predicate_idx": int(non_predicate_idx), |
|
"predicate": non_predicate, |
|
"is_verbal": False, |
|
"verb_form": "", |
|
"question": [], |
|
"answers": [], |
|
"answer_ranges": [] |
|
} |
|
qa_counter += 1 |
|
|
|
|
|
@classmethod |
|
def span_from_str(cls, s:str): |
|
start, end = s.split(":") |
|
return [int(start), int(end)] |
|
|
|
def _generate_examples_from_csv(self, split_proportion): |
|
|
|
""" Yields examples from a 'annot.?.csv' file in QANom's format.""" |
|
|
|
|
|
orig_splits_dfs = [pd.read_csv(filepath) |
|
for filepath in self.dataset_files] |
|
segment_df_from_orig_splits = [df.iloc[int(len(df)*start) : int(len(df)*end)] |
|
for df, (start,end) in zip(orig_splits_dfs, split_proportion)] |
|
|
|
df = pd.concat(segment_df_from_orig_splits, ignore_index=True) |
|
for counter, row in df.iterrows(): |
|
|
|
|
|
|
|
sent_domain = row.qasrl_id.split(":")[1] |
|
if sent_domain not in self.config.domains: |
|
continue |
|
|
|
|
|
na_to_underscore = lambda s: "_" if pd.isna(s) else str(s) |
|
question = [] if pd.isna(row.question) else list(map(na_to_underscore, [ |
|
row.wh, row.aux, row.subj, row.verb_slot_inflection, row.obj, row.prep, row.obj2 |
|
])) + ['?'] |
|
|
|
if question: |
|
if row.verb_form in self.verb_inflections and not pd.isna(row.verb_slot_inflection): |
|
verb_surface = self.verb_inflections[row.verb_form][row.verb_slot_inflection] |
|
else: |
|
verb_surface = row.verb_form |
|
if not pd.isna(row.verb_prefix): |
|
verb_surface = row.verb_prefix.replace("~!~", " ") + " " + verb_surface |
|
question[3] = verb_surface |
|
answers = [] if pd.isna(row.answer) else row.answer.split("~!~") |
|
answer_ranges = [] if pd.isna(row.answer_range) else [Qanom.span_from_str(s) for s in row.answer_range.split("~!~")] |
|
|
|
yield counter, { |
|
"sentence": row.sentence, |
|
"sent_id": row.qasrl_id, |
|
"predicate_idx": row.target_idx, |
|
"predicate": row.noun, |
|
"is_verbal": row.is_verbal, |
|
"verb_form": row.verb_form, |
|
"question": question, |
|
"answers": answers, |
|
"answer_ranges": answer_ranges |
|
} |
|
|