|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Quasar: Datasets for Question Answering by Search and Reading""" |
|
|
|
|
|
import gzip |
|
import datasets |
|
import json |
|
from collections import defaultdict |
|
from tqdm import tqdm |
|
|
|
_CITATION = """\ |
|
@article{dhingra2017quasar, |
|
title={Quasar: Datasets for Question Answering by Search and Reading}, |
|
author={Dhingra, Bhuwan and Mazaitis, Kathryn and Cohen, William W}, |
|
journal={arXiv preprint arXiv:1707.03904}, |
|
year={2017} |
|
} |
|
""" |
|
_UNKNOWN_RELATION = "UNK_RELATION" |
|
_UNKNOWN_ANS_TYPE = "UNK_ANS_TYPE" |
|
_UNKNOWN_GENRE = "UNK_GENRE" |
|
_QUASAR_S = "quasar-s" |
|
_QUASAR_T = "quasar-t" |
|
_QUASAR_T_NPS = "quasar-t-nps" |
|
_WHITE_SPACE = " " |
|
_DESCRIPTION = """\ |
|
We present two new large-scale datasets aimed at evaluating systems designed to comprehend a natural language query and extract its answer from a large corpus of text. The Quasar-S dataset consists of 37000 cloze-style (fill-in-the-gap) queries constructed from definitions of software entity tags on the popular website Stack Overflow. The posts and comments on the website serve as the background corpus for answering the cloze questions. The Quasar-T dataset consists of 43000 open-domain trivia questions and their answers obtained from various internet sources. ClueWeb09 serves as the background corpus for extracting these answers. We pose these datasets as a challenge for two related subtasks of factoid Question Answering: (1) searching for relevant pieces of text that include the correct answer to a query, and (2) reading the retrieved text to answer the query. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/bdhingra/quasar" |
|
|
|
_DATA_URL = "http://curtis.ml.cmu.edu/datasets/quasar" |
|
|
|
QUASAR_S_DESC = """\ |
|
Quasar-S consists of cloze style questions over software entities. The following information is provided. |
|
uid: Unique id |
|
question: Text of the question |
|
answer: Text of the answer |
|
context_short: List[{confidence: float, content: str}] |
|
context_long: The same as context_short, but from a different data source. see the paper for more info. |
|
relation: For some questions in Quasar-S, the relation type between head entity of the cloze question and the answer |
|
entity is provided. For the other questions, this field takes the value "UNK_RELATION". For example, |
|
[question]: jarjar -- jar jar links http : code.google.com p @placeholder is a utility that |
|
makes it easy to repackage java libraries and embed them into your own distribution ., |
|
[answer]: jarjar, |
|
[relationship]: synonym |
|
""" |
|
|
|
QUASAR_T_DESC = """\ |
|
The following information is provided. |
|
uid: unique id |
|
question: text of the question |
|
answer: text of the answer |
|
context_short: List[{confidence: float, content: str}] |
|
context_long: The same as context_short, but from a different data source. see the paper for more info. |
|
answer_type: Whether the answer is a date/time or number. This is known for some answers, for the others, this field |
|
takes the value "UNK_ANS_TYPE" |
|
genre: Whether the question is from the genre of arts or math/science. This is known for some questions, for the others, |
|
this field takes the value "UNK_GENRE" |
|
""" |
|
|
|
QUASAR_T_NPS_DESC = """\ |
|
Quasar-T consists of consists of trivia questions. The following information is provided. |
|
uid: unique id |
|
question: text of the question |
|
answer: text of the answer |
|
context_short: |
|
List[ |
|
{ |
|
confidence: float, |
|
content: str, |
|
content_tokens: List[str], |
|
nps: List[{'content': str, 'start_token_id': int}] |
|
} |
|
] |
|
Here, context_tokens is a whitespace tokenization of content. `nps` are contiguous chunks of NN* tagged tokens from the |
|
context as candidate answers. |
|
context_long: The same as context_short, but from a different data source. see the paper for more info. |
|
answer_type: Whether the answer is a date/time or number. This is known for some answers, for the others, this field |
|
takes the value "UNK_ANS_TYPE" |
|
genre: Whether the question is from the genre of arts or math/science. This is known for some questions, for the others, |
|
this field takes the value "UNK_GENRE" |
|
""" |
|
|
|
|
|
class Quasar(datasets.GeneratorBasedBuilder): |
|
"""MCTest: Machine comprehension test: http://research.microsoft.com/mct""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=_QUASAR_S, |
|
version=VERSION, |
|
description=QUASAR_S_DESC, |
|
), |
|
datasets.BuilderConfig( |
|
name=_QUASAR_T, |
|
version=VERSION, |
|
description=QUASAR_T_DESC, |
|
), |
|
datasets.BuilderConfig( |
|
name=_QUASAR_T_NPS, |
|
version=VERSION, |
|
description=QUASAR_T_NPS_DESC, |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = _QUASAR_S |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"uid": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"context_short": datasets.Sequence( |
|
dict( |
|
{ |
|
"confidence": datasets.Value("float"), |
|
"content": datasets.Value("string") |
|
} |
|
)), |
|
"context_long": datasets.Sequence( |
|
dict( |
|
{ |
|
"confidence": datasets.Value("float"), |
|
"content": datasets.Value("string") |
|
} |
|
)), |
|
"tags": datasets.Sequence(datasets.Value("string")), |
|
"answer": datasets.Value("string"), |
|
} |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.name == _QUASAR_S: |
|
features.update({ |
|
"relation": datasets.Value("string") |
|
}) |
|
elif self.config.name.startswith(_QUASAR_T): |
|
features.update({ |
|
"answer_type": datasets.Value("string"), |
|
"genre": datasets.Value("string") |
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.name == _QUASAR_T_NPS: |
|
for _type in ["short", "long"]: |
|
features[f"context_{_type}"] = datasets.Sequence( |
|
dict( |
|
{ |
|
"confidence": datasets.Value("float"), |
|
"content": datasets.Value("string"), |
|
"content_tokens": datasets.Sequence(datasets.Value("string")), |
|
"nps": datasets.Sequence(dict( |
|
{ |
|
"content": datasets.Value("string"), |
|
"start_token_id": datasets.Value("int32") |
|
} |
|
)) |
|
} |
|
) |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
paths = {} |
|
phases = ["train", "dev", "test"] |
|
if self.config.name == _QUASAR_S: |
|
data_path = f"{_DATA_URL}/{_QUASAR_S}" |
|
for phase in phases: |
|
paths[phase] = { |
|
"qa": dl_manager.download(f"{data_path}/questions/{phase}_questions.json.gz"), |
|
"contexts_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_contexts.json.gz"), |
|
"contexts_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_contexts.json.gz"), |
|
} |
|
paths["relations"] = dl_manager.download(f"{data_path}/relation_annotations.json") |
|
elif self.config.name.startswith(_QUASAR_T): |
|
data_path = f"{_DATA_URL}/{_QUASAR_T}" |
|
for phase in phases: |
|
paths[phase] = { |
|
"qa": dl_manager.download(f"{data_path}/questions/{phase}_questions.json.gz"), |
|
"contexts_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_contexts.json.gz"), |
|
"contexts_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_contexts.json.gz"), |
|
} |
|
paths["answer_types"] = dl_manager.download(f"{data_path}/answer_annotations.json") |
|
paths["genres"] = dl_manager.download(f"{data_path}/genre_annotations.json") |
|
if self.config.name == _QUASAR_T_NPS: |
|
for phase in phases: |
|
paths[phase].update( |
|
{ |
|
"nps_long": dl_manager.download(f"{data_path}/contexts/long/{phase}_nps.json.gz"), |
|
"nps_short": dl_manager.download(f"{data_path}/contexts/short/{phase}_nps.json.gz"), |
|
} |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": paths, "phase": "train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepath": paths, "phase": "dev"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": paths, "phase": "test"}, |
|
), |
|
] |
|
|
|
@staticmethod |
|
def _read_file(path): |
|
""" |
|
read a json.gz file |
|
:param path: |
|
:return: |
|
""" |
|
with gzip.open(path) as rf: |
|
for line in rf: |
|
yield eval(line) |
|
|
|
@staticmethod |
|
def _invert_dict(_dict): |
|
""" |
|
converts a dict of Dict[str, List[str]] to Dict[str, str], where each key in the new dict is one of the |
|
values in the original dict |
|
:param _dict: |
|
:return: |
|
""" |
|
_d = {} |
|
for k, v in _dict.items(): |
|
for _v in v: |
|
_d[_v] = k |
|
return _d |
|
|
|
@staticmethod |
|
def _get_nps(nps, context_sentences): |
|
np_sentence_dict = defaultdict(list) |
|
for candidate, context_id, token_id in nps: |
|
np_sentence_dict[context_id].append((candidate, token_id)) |
|
_context_sentences = [{ |
|
"confidence": context_sentence["confidence"], |
|
"content": context_sentence["content"], |
|
"content_tokens": context_sentence["content"].split(_WHITE_SPACE), |
|
"nps": [{"content": np[0], "start_token_id": np[1]} for np in np_sentence_dict[index]] |
|
} for index, context_sentence in enumerate(context_sentences)] |
|
return _context_sentences |
|
|
|
@staticmethod |
|
def _get_base_datum(qa, context_long, context_short): |
|
uid = qa["uid"] |
|
assert context_long["uid"] == uid |
|
assert context_short["uid"] == uid |
|
context_long = [{"confidence": context[0], "content": context[1]} for context in context_long["contexts"]] |
|
context_short = [{"confidence": context[0], "content": context[1]} for context in context_short["contexts"]] |
|
return { |
|
"uid": qa["uid"], |
|
"question": qa["question"], |
|
"context_short": context_short, |
|
"context_long": context_long, |
|
"tags": qa["tags"], |
|
"answer": qa["answer"] |
|
} |
|
|
|
def _generate_examples(self, filepath, phase): |
|
qas = self._read_file(filepath[phase]["qa"]) |
|
contexts_long = self._read_file(filepath[phase]["contexts_long"]) |
|
contexts_short = self._read_file(filepath[phase]["contexts_short"]) |
|
if self.config.name == _QUASAR_S: |
|
relations = self._invert_dict(json.load(open(filepath["relations"]))) |
|
for qa, context_long, context_short in zip(qas, contexts_long, contexts_short): |
|
datum = self._get_base_datum(qa, context_long, context_short) |
|
datum.update({"relation": relations.get(qa["uid"], _UNKNOWN_RELATION)}) |
|
yield qa["uid"], datum |
|
elif self.config.name == _QUASAR_T: |
|
answer_types = self._invert_dict(json.load(open(filepath["answer_types"]))) |
|
genres = self._invert_dict(json.load(open(filepath["genres"]))) |
|
for qa, context_long, context_short in zip(qas, contexts_long, contexts_short): |
|
datum = self._get_base_datum(qa, context_long, context_short) |
|
datum.update({"answer_type": answer_types.get(qa["uid"], _UNKNOWN_ANS_TYPE)}) |
|
datum.update({"genre": genres.get(qa["uid"], _UNKNOWN_GENRE)}) |
|
yield qa["uid"], datum |
|
elif self.config.name == _QUASAR_T_NPS: |
|
answer_types = self._invert_dict(json.load(open(filepath["answer_types"]))) |
|
genres = self._invert_dict(json.load(open(filepath["genres"]))) |
|
nps_long = self._read_file(filepath[phase]["nps_long"]) |
|
nps_short = self._read_file(filepath[phase]["nps_short"]) |
|
for qa, context_long, context_short, np_long, np_short in zip(qas, contexts_long, contexts_short, nps_long, |
|
nps_short): |
|
datum = self._get_base_datum(qa, context_long, context_short) |
|
assert np_long["uid"] == qa["uid"] |
|
assert np_short["uid"] == qa["uid"] |
|
datum.update({"answer_type": answer_types.get(qa["uid"], _UNKNOWN_ANS_TYPE)}) |
|
datum.update({"genre": genres.get(qa["uid"], _UNKNOWN_GENRE)}) |
|
datum["context_long"] = self._get_nps(np_long["nps"], datum["context_long"]) |
|
datum["context_short"] = self._get_nps(np_short["nps"], datum["context_short"]) |
|
yield qa["uid"], datum |
|
|