qa_srl2018 / qa_srl2018.py
kleinay's picture
Update qa_srl2018.py
50e0b9e
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" QA-SRL Bank v2 Dataset"""
import datasets
from dataclasses import dataclass
from typing import List, Tuple, Union, Set, Iterable
from pathlib import Path
import gzip
import json
_CITATION = """\
@inproceedings{fitzgerald2018large,
title={Large-Scale QA-SRL Parsing},
author={FitzGerald, Nicholas and Michael, Julian and He, Luheng and Zettlemoyer, Luke},
booktitle={Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
pages={2051--2060},
year={2018}
}
"""
_DESCRIPTION = """\
The dataset contains question-answer pairs to model verbal predicate-argument structure. The questions start with wh-words (Who, What, Where, What, etc.) and contain a verb predicate in the sentence; the answers are phrases in the sentence.
This dataset, a.k.a "QASRL Bank", "QASRL-v2" or "QASRL-LS" (Large Scale), was constructed via crowdsourcing.
"""
_HOMEPAGE = "https://qasrl.org"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLs = {
"qasrl_v2": "http://qasrl.org/data/qasrl-v2.tar",
"qasrl_v2_1": "https://qasrl.org/data/qasrl-v2_1.tar"
}
SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
SUPPOERTED_DOMAINS = {"wikinews", "wikipedia", "TQA"}
@dataclass
class QASRL2018BuilderConfig(datasets.BuilderConfig):
""" Allow the loader to provide a subset of acceptable domains. Acceptable domains are {"wikipedia", "wikinews", "TQA"}.
"""
dataset_version: str = "v2_1"
domains: Union[str, Iterable[str]] = "all" #
# Name of the dataset usually match the script name with CamelCase instead of snake_case
class QaSrl2018(datasets.GeneratorBasedBuilder):
"""QA-SRL2018: Large-Scale Question-Answer Driven Semantic Role Labeling corpus"""
VERSION = datasets.Version("1.2.0")
BUILDER_CONFIG_CLASS = QASRL2018BuilderConfig
BUILDER_CONFIGS = [
QASRL2018BuilderConfig(
name="v2", dataset_version="v2", version=VERSION,
description="This provides WIKIPEDIA dataset for qa_srl corpus (original version from Fitzgerald et. al., 2018)"
),
QASRL2018BuilderConfig(
name="v2_1", dataset_version="v2_1", version=VERSION,
description="This provides WIKIPEDIA dataset for qa_srl corpus (version 2.1)"
),
]
DEFAULT_CONFIG_NAME = (
"v2_1"
)
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"sent_id": datasets.Value("string"),
"predicate_idx": datasets.Value("int32"),
"predicate": datasets.Value("string"),
"is_verbal": datasets.Value("bool"),
"verb_form": datasets.Value("string"),
"question": datasets.Sequence(datasets.Value("string")),
"answers": datasets.Sequence(datasets.Value("string")),
"answer_ranges": datasets.Sequence(SpanFeatureType)
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
"""Returns SplitGenerators."""
# iterate the tar file of the corpus
qasrl_dataset_version = self.config.dataset_version
corpus_base_path = Path(dl_manager.download_and_extract(_URLs[f"qasrl_{qasrl_dataset_version}"]))
corpus_orig = corpus_base_path / f"qasrl-{qasrl_dataset_version}" / "orig"
# Handle domain selection
domains: Set[str] = []
if self.config.domains == "all":
domains = SUPPOERTED_DOMAINS
elif isinstance(self.config.domains, str):
if self.config.domains in SUPPOERTED_DOMAINS:
domains = {self.config.domains}
else:
raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
else:
domains = set(self.config.domains) & SUPPOERTED_DOMAINS
if len(domains) == 0:
raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
self.config.domains = domains
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": corpus_orig / "train.jsonl.gz",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": corpus_orig / "dev.jsonl.gz",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": corpus_orig / "test.jsonl.gz",
},
),
]
def _generate_examples(self, filepath):
""" Yields examples from a '.jsonl.gz' file ."""
empty_to_underscore = lambda s: "_" if s=="" else s
with gzip.open(filepath, "rt") as f:
qa_counter = 0
for line in f:
sent_obj = json.loads(line.strip())
tokens = sent_obj['sentenceTokens']
sentence = ' '.join(tokens)
sent_id = sent_obj['sentenceId']
# consider only selected domains
sent_domain = "TQA" if sent_id.startswith("TQA") else sent_id.split(":")[1]
if sent_domain not in self.config.domains:
continue
for predicate_idx, verb_obj in sent_obj['verbEntries'].items():
verb_forms = verb_obj['verbInflectedForms']
predicate = tokens[int(predicate_idx)]
for question_obj in verb_obj['questionLabels'].values():
question_slots = question_obj['questionSlots']
verb_form = question_slots['verb']
verb_surface = verb_forms[verb_form.split(" ")[-1]] # if verb_form in verb_forms else verb_forms['stem']
question_slots_in_order = [
question_slots["wh"],
question_slots["aux"],
question_slots["subj"],
verb_surface,
question_slots["obj"],
empty_to_underscore(question_slots["prep"]), # fix bug in data
question_slots["obj2"],
'?'
]
# retrieve answers
answer_spans = []
for ans in question_obj['answerJudgments']:
if ans['isValid']:
answer_spans.extend(ans['spans'])
answer_spans = list(set(tuple(a) for a in answer_spans))
# answer_spans = list(set(answer_spans))
answer_strs = [' '.join([tokens[i] for i in range(*span)])
for span in answer_spans]
yield qa_counter, {
"sentence": sentence,
"sent_id": sent_id,
"predicate_idx": predicate_idx,
"predicate": predicate,
"is_verbal": True,
"verb_form": verb_forms['stem'],
"question": question_slots_in_order,
"answers": answer_strs,
"answer_ranges": answer_spans
}
qa_counter += 1