|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Passage, query, answers and answer classification with explanations.""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@unpublished{eraser2019, |
|
title = {ERASER: A Benchmark to Evaluate Rationalized NLP Models}, |
|
author = {Jay DeYoung and Sarthak Jain and Nazneen Fatema Rajani and Eric Lehman and Caiming Xiong and Richard Socher and Byron C. Wallace} |
|
} |
|
@inproceedings{MultiRC2018, |
|
author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth}, |
|
title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences}, |
|
booktitle = {NAACL}, |
|
year = {2018} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
Eraser Multi RC is a dataset for queries over multi-line passages, along with |
|
answers and a rationalte. Each example in this dataset has the following 5 parts |
|
1. A Mutli-line Passage |
|
2. A Query about the passage |
|
3. An Answer to the query |
|
4. A Classification as to whether the answer is right or wrong |
|
5. An Explanation justifying the classification |
|
""" |
|
|
|
_DOWNLOAD_URL = "http://www.eraserbenchmark.com/zipped/multirc.tar.gz" |
|
|
|
|
|
class EraserMultiRc(datasets.GeneratorBasedBuilder): |
|
"""Multi Sentence Reasoning with Explanations (Eraser Benchmark).""" |
|
|
|
VERSION = datasets.Version("0.1.1") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"passage": datasets.Value("string"), |
|
"query_and_answer": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=["False", "True"]), |
|
"evidences": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://cogcomp.seas.upenn.edu/multirc/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL) |
|
data_dir = os.path.join(dl_dir, "multirc") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "train.jsonl")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "val.jsonl")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"data_dir": data_dir, "filepath": os.path.join(data_dir, "test.jsonl")}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_dir, filepath): |
|
"""Yields examples.""" |
|
|
|
multirc_dir = os.path.join(data_dir, "docs") |
|
with open(filepath, encoding="utf-8") as f: |
|
for line in f: |
|
row = json.loads(line) |
|
evidences = [] |
|
|
|
for evidence in row["evidences"][0]: |
|
docid = evidence["docid"] |
|
evidences.append(evidence["text"]) |
|
|
|
passage_file = os.path.join(multirc_dir, docid) |
|
with open(passage_file, encoding="utf-8") as f1: |
|
passage_text = f1.read() |
|
|
|
yield row["annotation_id"], { |
|
"passage": passage_text, |
|
"query_and_answer": row["query"], |
|
"label": row["classification"], |
|
"evidences": evidences, |
|
} |
|
|