File size: 5,994 Bytes
8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 96b5bb6 8bf74e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
"""Scientific fact-checking dataset. Verifies claims based on citation sentences
using evidence from the cited abstracts. Formatted as a paragraph-level entailment task."""
import datasets
import json
_CITATION = """\
@article{Saakyan2021COVIDFactFE,
title={COVID-Fact: Fact Extraction and Verification of Real-World Claims on COVID-19 Pandemic},
author={Arkadiy Saakyan and Tuhin Chakrabarty and Smaranda Muresan},
journal={ArXiv},
year={2021},
volume={abs/2106.03794},
url={https://api.semanticscholar.org/CorpusID:235364036}
}
"""
_DESCRIPTION = """\
COVID-FACT is a dataset of claims about COVID-19. For this version of the dataset, we follow the preprocessing from the MultiVerS modeling paper https://github.com/dwadden/multivers, verifying claims against abstracts of scientific research articles. Entailment labels and rationales are included.
"""
_URL = "https://scifact.s3.us-west-2.amazonaws.com/longchecker/latest/data.tar.gz"
def flatten(xss):
return [x for xs in xss for x in xs]
class CovidFactEntailmentConfig(datasets.BuilderConfig):
"""builderconfig for covidfact"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CovidFactEntailmentConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
class CovidFactEntailment(datasets.GeneratorBasedBuilder):
"""TODO(covidfact): Short description of my dataset."""
# TODO(covidfact): Set up version.
VERSION = datasets.Version("0.1.0")
def _info(self):
# TODO(covidfact): Specifies the datasets.DatasetInfo object
features = {
"claim_id": datasets.Value("int32"),
"claim": datasets.Value("string"),
"abstract_id": datasets.Value("int32"),
"title": datasets.Value("string"),
"abstract": datasets.features.Sequence(datasets.Value("string")),
"verdict": datasets.Value("string"),
"evidence": datasets.features.Sequence(datasets.Value("int32")),
}
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
features
# These are the features of your dataset like images, labels ...
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://scifact.apps.allenai.org/",
citation=_CITATION,
)
@staticmethod
def _read_tar_file(f):
res = []
for row in f:
this_row = json.loads(row.decode("utf-8"))
res.append(this_row)
return res
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(scifact): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
archive = dl_manager.download(_URL)
for path, f in dl_manager.iter_archive(archive):
# The claims are too similar to paper titles; don't include.
if path == "data/covidfact/corpus_without_titles.jsonl":
corpus = self._read_tar_file(f)
corpus = {x["doc_id"]: x for x in corpus}
elif path == "data/covidfact/claims_train.jsonl":
claims_train = self._read_tar_file(f)
elif path == "data/covidfact/claims_test.jsonl":
claims_test = self._read_tar_file(f)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"claims": claims_train,
"corpus": corpus,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"claims": claims_test,
"corpus": corpus,
"split": "test",
},
),
]
def _generate_examples(self, claims, corpus, split):
"""Yields examples."""
# Loop over claims and put evidence together with claim.
id_ = -1 # Will increment to 0 on first iteration.
for claim in claims:
evidence = {int(k): v for k, v in claim["evidence"].items()}
for cited_doc_id in claim["doc_ids"]:
cited_doc = corpus[cited_doc_id]
abstract_sents = [sent.strip() for sent in cited_doc["abstract"]]
if cited_doc_id in evidence:
this_evidence = evidence[cited_doc_id]
verdict = this_evidence[0][
"label"
] # Can take first evidence since all labels are same.
evidence_sents = flatten(
[entry["sentences"] for entry in this_evidence]
)
else:
verdict = "NEI"
evidence_sents = []
instance = {
"claim_id": claim["id"],
"claim": claim["claim"],
"abstract_id": cited_doc_id,
"title": cited_doc["title"],
"abstract": abstract_sents,
"verdict": verdict,
"evidence": evidence_sents,
}
id_ += 1
yield id_, instance
|