|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""A Dataset loading script for the QANom dataset (klein et. al., COLING 2000).""" |
|
|
|
|
|
from dataclasses import dataclass |
|
from typing import Optional, Tuple |
|
import datasets |
|
from pathlib import Path |
|
import pandas as pd |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{klein2020qanom, |
|
title={QANom: Question-Answer driven SRL for Nominalizations}, |
|
author={Klein, Ayal and Mamou, Jonathan and Pyatkin, Valentina and Stepanov, Daniela and He, Hangfeng and Roth, Dan and Zettlemoyer, Luke and Dagan, Ido}, |
|
booktitle={Proceedings of the 28th International Conference on Computational Linguistics}, |
|
pages={3069--3083}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains question-answer pairs to model predicate-argument structure of deverbal nominalizations. |
|
The questions start with wh-words (Who, What, Where, What, etc.) and contain a the verbal form of a nominalization from the sentence; |
|
the answers are phrases in the sentence. |
|
See the paper for details: QANom: Question-Answer driven SRL for Nominalizations (Klein et. al., COLING 2020) |
|
For previewing the QANom data along with the verbal annotations of QASRL, check out "https://browse.qasrl.org/". |
|
This dataset was annotated by selected workers from Amazon Mechanical Turk. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/kleinay/QANom" |
|
|
|
_LICENSE = """MIT License |
|
|
|
Copyright (c) 2020 Ayal Klein (kleinay) |
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
of this software and associated documentation files (the "Software"), to deal |
|
in the Software without restriction, including without limitation the rights |
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
copies of the Software, and to permit persons to whom the Software is |
|
furnished to do so, subject to the following conditions: |
|
|
|
The above copyright notice and this permission notice shall be included in all |
|
copies or substantial portions of the Software. |
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
SOFTWARE.""" |
|
|
|
|
|
_URLs = { |
|
"qanom_zip": "https://github.com/kleinay/QANom/raw/master/qanom_dataset.zip" |
|
} |
|
|
|
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2) |
|
|
|
@dataclass |
|
class QANomBuilderConfig(datasets.BuilderConfig): |
|
""" Allow the loader to re-distribute the original dev and test splits between train, dev and test. """ |
|
redistribute_dev: Tuple[float, float, float] = (0., 1., 0.) |
|
redistribute_test: Tuple[float, float, float] = (0., 0., 1.) |
|
|
|
|
|
|
|
|
|
class Qanom(datasets.GeneratorBasedBuilder): |
|
"""QANom: Question-Answer driven SRL for Nominalizations corpus. |
|
Notice: This datasets genrally follows the format of `qa_srl` and `kleinay\qa_srl2018` datasets. |
|
However, it extends Features to include "is_verbal" and "verb_form" fields (required for nominalizations). |
|
In addition, and most critically, unlike these verbal qasrl datasets, in the qanom datset some examples |
|
are for canidate nominalization which are judged to be non-predicates ("is_verbal"==False) or predicates with no QAs. |
|
In these cases, the qa fields (question, answers, answer_ranges) would be empty lists. """ |
|
|
|
VERSION = datasets.Version("1.0.2") |
|
|
|
BUILDER_CONFIG_CLASS = QANomBuilderConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
QANomBuilderConfig( |
|
name="default", version=VERSION, description="This provides the QANom dataset" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"default" |
|
) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"sent_id": datasets.Value("string"), |
|
"predicate_idx": datasets.Value("int32"), |
|
"predicate": datasets.Value("string"), |
|
"is_verbal": datasets.Value("bool"), |
|
"verb_form": datasets.Value("string"), |
|
"question": datasets.Sequence(datasets.Value("string")), |
|
"answers": datasets.Sequence(datasets.Value("string")), |
|
"answer_ranges": datasets.Sequence(SpanFeatureType) |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _prepare_wiktionary_verb_inflections(self, dl_manager): |
|
wiktionary_url = "https://raw.githubusercontent.com/nafitzgerald/nrl-qasrl/master/data/wiktionary/en_verb_inflections.txt" |
|
wiktionary_path = dl_manager.download(wiktionary_url) |
|
verb_map = {} |
|
with open(wiktionary_path, 'r', encoding="utf-8") as f: |
|
for l in f.readlines(): |
|
inflections = l.strip().split('\t') |
|
stem, presentsingular3rd, presentparticiple, past, pastparticiple = inflections |
|
for inf in inflections: |
|
verb_map[inf] = {"Stem" : stem, "PresentSingular3rd" : presentsingular3rd, "PresentParticiple":presentparticiple, "Past":past, "PastParticiple":pastparticiple} |
|
self.verb_inflections = verb_map |
|
|
|
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
|
|
self._prepare_wiktionary_verb_inflections(dl_manager) |
|
|
|
self.corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qanom_zip"])) |
|
|
|
self.dataset_files = [ |
|
self.corpus_base_path / "annot.train.csv", |
|
self.corpus_base_path / "annot.dev.csv", |
|
self.corpus_base_path / "annot.test.csv" |
|
] |
|
|
|
|
|
orig_dev_segments = ((0, self.config.redistribute_dev[0]), |
|
(self.config.redistribute_dev[0], sum(self.config.redistribute_dev[:2])), |
|
(sum(self.config.redistribute_dev[:2]), 1)) |
|
orig_tst_segments = ((0, self.config.redistribute_test[0]), |
|
(self.config.redistribute_test[0], sum(self.config.redistribute_test[:2])), |
|
(sum(self.config.redistribute_test[:2]), 1)) |
|
train_proportion = ((0,1), |
|
orig_dev_segments[0], |
|
orig_tst_segments[0]) |
|
dev_proportion = ((0,0), |
|
orig_dev_segments[1], |
|
orig_tst_segments[1]) |
|
test_proportion = ((0,0), |
|
orig_dev_segments[2], |
|
orig_tst_segments[2]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"split_proportion": train_proportion |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"split_proportion": dev_proportion |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"split_proportion": test_proportion |
|
}, |
|
), |
|
] |
|
|
|
@classmethod |
|
def span_from_str(cls, s:str): |
|
start, end = s.split(":") |
|
return [int(start), int(end)] |
|
|
|
def _generate_examples(self, split_proportion=None): |
|
|
|
""" Yields examples from a 'annot.?.csv' file in QANom's format.""" |
|
|
|
|
|
orig_splits_dfs = [pd.read_csv(filepath) |
|
for filepath in self.dataset_files] |
|
segment_df_from_orig_splits = [df.iloc[int(len(df)*start) : int(len(df)*end)] |
|
for df, (start,end) in zip(orig_splits_dfs, split_proportion)] |
|
|
|
df = pd.concat(segment_df_from_orig_splits, ignore_index=True) |
|
for counter, row in df.iterrows(): |
|
|
|
|
|
|
|
na_to_underscore = lambda s: "_" if pd.isna(s) else str(s) |
|
question = [] if pd.isna(row.question) else list(map(na_to_underscore, [ |
|
row.wh, row.aux, row.subj, row.verb_slot_inflection, row.obj, row.prep, row.obj2 |
|
])) + ['?'] |
|
|
|
if question: |
|
if row.verb_form in self.verb_inflections and not pd.isna(row.verb_slot_inflection): |
|
verb_surface = self.verb_inflections[row.verb_form][row.verb_slot_inflection] |
|
else: |
|
verb_surface = row.verb_form |
|
if not pd.isna(row.verb_prefix): |
|
verb_surface = row.verb_prefix.replace("~!~", " ") + " " + verb_surface |
|
question[3] = verb_surface |
|
answers = [] if pd.isna(row.answer) else row.answer.split("~!~") |
|
answer_ranges = [] if pd.isna(row.answer_range) else [Qanom.span_from_str(s) for s in row.answer_range.split("~!~")] |
|
|
|
yield counter, { |
|
"sentence": row.sentence, |
|
"sent_id": row.qasrl_id, |
|
"predicate_idx": row.target_idx, |
|
"predicate": row.noun, |
|
"is_verbal": row.is_verbal, |
|
"verb_form": row.verb_form, |
|
"question": question, |
|
"answers": answers, |
|
"answer_ranges": answer_ranges |
|
} |
|
|