scifact_entailment / scifact_entailment.py
David Wadden
Fix the entailment script.
fd02f3e
raw
history blame contribute delete
No virus
5.67 kB
"""Scientific fact-checking dataset. Verifies claims based on citation sentences
using evidence from the cited abstracts. Formatted as a paragraph-level entailment task."""
import datasets
import json
_CITATION = """\
@inproceedings{Wadden2020FactOF,
title={Fact or Fiction: Verifying Scientific Claims},
author={David Wadden and Shanchuan Lin and Kyle Lo and Lucy Lu Wang and Madeleine van Zuylen and Arman Cohan and Hannaneh Hajishirzi},
booktitle={EMNLP},
year={2020},
}
"""
_DESCRIPTION = """\
SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
"""
_URL = "https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz"
def flatten(xss):
return [x for xs in xss for x in xs]
class ScifactEntailmentConfig(datasets.BuilderConfig):
"""BuilderConfig for Scifact"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ScifactEntailmentConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
class ScifactEntailment(datasets.GeneratorBasedBuilder):
"""TODO(scifact): Short description of my dataset."""
# TODO(scifact): Set up version.
VERSION = datasets.Version("0.1.0")
def _info(self):
# TODO(scifact): Specifies the datasets.DatasetInfo object
features = {
"claim_id": datasets.Value("int32"),
"claim": datasets.Value("string"),
"abstract_id": datasets.Value("int32"),
"title": datasets.Value("string"),
"abstract": datasets.features.Sequence(datasets.Value("string")),
"verdict": datasets.Value("string"),
"evidence": datasets.features.Sequence(datasets.Value("int32")),
}
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
features
# These are the features of your dataset like images, labels ...
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://scifact.apps.allenai.org/",
citation=_CITATION,
)
@staticmethod
def _read_tar_file(f):
res = []
for row in f:
this_row = json.loads(row.decode("utf-8"))
res.append(this_row)
return res
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(scifact): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
archive = dl_manager.download(_URL)
for path, f in dl_manager.iter_archive(archive):
if path == "data/corpus.jsonl":
corpus = self._read_tar_file(f)
corpus = {x["doc_id"]: x for x in corpus}
elif path == "data/claims_train.jsonl":
claims_train = self._read_tar_file(f)
elif path == "data/claims_dev.jsonl":
claims_validation = self._read_tar_file(f)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"claims": claims_train,
"corpus": corpus,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"claims": claims_validation,
"corpus": corpus,
"split": "validation",
},
),
]
def _generate_examples(self, claims, corpus, split):
"""Yields examples."""
# Loop over claims and put evidence together with claim.
id_ = -1 # Will increment to 0 on first iteration.
for claim in claims:
evidence = {int(k): v for k, v in claim["evidence"].items()}
for cited_doc_id in claim["cited_doc_ids"]:
cited_doc = corpus[cited_doc_id]
abstract_sents = [sent.strip() for sent in cited_doc["abstract"]]
if cited_doc_id in evidence:
this_evidence = evidence[cited_doc_id]
verdict = this_evidence[0][
"label"
] # Can take first evidence since all labels are same.
evidence_sents = flatten(
[entry["sentences"] for entry in this_evidence]
)
else:
verdict = "NEI"
evidence_sents = []
instance = {
"claim_id": claim["id"],
"claim": claim["claim"],
"abstract_id": cited_doc_id,
"title": cited_doc["title"],
"abstract": abstract_sents,
"verdict": verdict,
"evidence": evidence_sents,
}
id_ += 1
yield id_, instance