Datasets:
Tasks:
Text Classification
Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
extended|wikipedia
Tags:
knowledge-verification
License:
# Modified by Nora Belrose of EleutherAI (2023) | |
# coding=utf-8 | |
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""FEVER dataset.""" | |
import json | |
import os | |
import textwrap | |
import datasets | |
class FeverConfig(datasets.BuilderConfig): | |
"""BuilderConfig for FEVER.""" | |
def __init__(self, homepage: str = None, citation: str = None, base_url: str = None, urls: dict = None, **kwargs): | |
"""BuilderConfig for FEVER. | |
Args: | |
homepage (`str`): Homepage. | |
citation (`str`): Citation reference. | |
base_url (`str`): Data base URL that precedes all data URLs. | |
urls (`dict`): Data URLs (each URL will pe preceded by `base_url`). | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super().__init__(**kwargs) | |
self.homepage = homepage | |
self.citation = citation | |
self.base_url = base_url | |
self.urls = {key: f"{base_url}/{url}" for key, url in urls.items()} | |
class Fever(datasets.GeneratorBasedBuilder): | |
"""Fact Extraction and VERification Dataset.""" | |
BUILDER_CONFIGS = [ | |
FeverConfig( | |
name="v1.0", | |
version=datasets.Version("1.0.0"), | |
description=textwrap.dedent( | |
"FEVER v1.0\n" | |
"FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences " | |
"extracted from Wikipedia and subsequently verified without knowledge of the sentence they were " | |
"derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two " | |
"classes, the annotators also recorded the sentence(s) forming the necessary evidence for their " | |
"judgment." | |
), | |
homepage="https://fever.ai/dataset/fever.html", | |
citation=textwrap.dedent( | |
"""\ | |
@inproceedings{Thorne18Fever, | |
author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit}, | |
title = {{FEVER}: a Large-scale Dataset for Fact Extraction and {VERification}}, | |
booktitle = {NAACL-HLT}, | |
year = {2018} | |
}""" | |
), | |
base_url="https://fever.ai/download/fever", | |
urls={ | |
datasets.Split.TRAIN: "train.jsonl", | |
"dev": "shared_task_dev.jsonl", | |
"paper_dev": "paper_dev.jsonl", | |
"paper_test": "paper_test.jsonl", | |
}, | |
), | |
FeverConfig( | |
name="v2.0", | |
version=datasets.Version("2.0.0"), | |
description=textwrap.dedent( | |
"FEVER v2.0:\n" | |
"The FEVER 2.0 Dataset consists of 1174 claims created by the submissions of participants in the " | |
"Breaker phase of the 2019 shared task. Participants (Breakers) were tasked with generating " | |
"adversarial examples that induce classification errors for the existing systems. Breakers submitted " | |
"a dataset of up to 1000 instances with equal number of instances for each of the three classes " | |
"(Supported, Refuted NotEnoughInfo). Only novel claims (i.e. not contained in the original FEVER " | |
"dataset) were considered as valid entries to the shared task. The submissions were then manually " | |
"evaluated for Correctness (grammatical, appropriately labeled and meet the FEVER annotation " | |
"guidelines requirements)." | |
), | |
homepage="https://fever.ai/dataset/adversarial.html", | |
citation=textwrap.dedent( | |
"""\ | |
@inproceedings{Thorne19FEVER2, | |
author = {Thorne, James and Vlachos, Andreas and Cocarascu, Oana and Christodoulopoulos, Christos and Mittal, Arpit}, | |
title = {The {FEVER2.0} Shared Task}, | |
booktitle = {Proceedings of the Second Workshop on {Fact Extraction and VERification (FEVER)}}, | |
year = {2018} | |
}""" | |
), | |
base_url="https://fever.ai/download/fever2.0", | |
urls={ | |
datasets.Split.VALIDATION: "fever2-fixers-dev.jsonl", | |
}, | |
), | |
FeverConfig( | |
name="wiki_pages", | |
version=datasets.Version("1.0.0"), | |
description=textwrap.dedent( | |
"Wikipedia pages for FEVER v1.0:\n" | |
"FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences " | |
"extracted from Wikipedia and subsequently verified without knowledge of the sentence they were " | |
"derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two " | |
"classes, the annotators also recorded the sentence(s) forming the necessary evidence for their " | |
"judgment." | |
), | |
homepage="https://fever.ai/dataset/fever.html", | |
citation=textwrap.dedent( | |
"""\ | |
@inproceedings{Thorne18Fever, | |
author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit}, | |
title = {{FEVER}: a Large-scale Dataset for Fact Extraction and {VERification}}, | |
booktitle = {NAACL-HLT}, | |
year = {2018} | |
}""" | |
), | |
base_url="https://fever.ai/download/fever", | |
urls={ | |
"wikipedia_pages": "wiki-pages.zip", | |
}, | |
), | |
] | |
def _info(self): | |
if self.config.name == "wiki_pages": | |
features = { | |
"id": datasets.Value("string"), | |
"text": datasets.Value("string"), | |
"lines": datasets.Value("string"), | |
} | |
elif self.config.name == "v1.0" or self.config.name == "v2.0": | |
features = { | |
"id": datasets.Value("int32"), | |
"label": datasets.ClassLabel(names=["REFUTES", "SUPPORTS"]), | |
"claim": datasets.Value("string"), | |
"evidence_annotation_id": datasets.Value("int32"), | |
"evidence_id": datasets.Value("int32"), | |
"evidence_wiki_url": datasets.Value("string"), | |
"evidence_sentence_id": datasets.Value("int32"), | |
} | |
return datasets.DatasetInfo( | |
description=self.config.description, | |
features=datasets.Features(features), | |
homepage=self.config.homepage, | |
citation=self.config.citation, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
dl_paths = dl_manager.download_and_extract(self.config.urls) | |
return [ | |
datasets.SplitGenerator( | |
name=split, | |
gen_kwargs={ | |
"filepath": dl_paths[split] | |
if self.config.name != "wiki_pages" | |
else dl_manager.iter_files(os.path.join(dl_paths[split], "wiki-pages")), | |
}, | |
) | |
for split in dl_paths.keys() | |
] | |
def _generate_examples(self, filepath): | |
"""Yields examples.""" | |
if self.config.name == "v1.0" or self.config.name == "v2.0": | |
with open(filepath, encoding="utf-8") as f: | |
for row_id, row in enumerate(f): | |
data = json.loads(row) | |
id_ = data["id"] | |
label = data.get("label", "") | |
# Drop the examples with label "NOT ENOUGH INFO" | |
if label not in ("REFUTES", "SUPPORTS"): | |
continue | |
claim = data["claim"] | |
evidences = data.get("evidence", []) | |
if len(evidences) > 0: | |
for i in range(len(evidences)): | |
for j in range(len(evidences[i])): | |
annot_id = evidences[i][j][0] if evidences[i][j][0] else -1 | |
evidence_id = evidences[i][j][1] if evidences[i][j][1] else -1 | |
wiki_url = evidences[i][j][2] if evidences[i][j][2] else "" | |
sent_id = evidences[i][j][3] if evidences[i][j][3] else -1 | |
yield str(row_id) + "_" + str(i) + "_" + str(j), { | |
"id": id_, | |
"label": label, | |
"claim": claim, | |
"evidence_annotation_id": annot_id, | |
"evidence_id": evidence_id, | |
"evidence_wiki_url": wiki_url, | |
"evidence_sentence_id": sent_id, | |
} | |
else: | |
yield row_id, { | |
"id": id_, | |
"label": label, | |
"claim": claim, | |
"evidence_annotation_id": -1, | |
"evidence_id": -1, | |
"evidence_wiki_url": "", | |
"evidence_sentence_id": -1, | |
} | |
elif self.config.name == "wiki_pages": | |
for file_id, file in enumerate(filepath): | |
with open(file, encoding="utf-8") as f: | |
for row_id, row in enumerate(f): | |
data = json.loads(row) | |
yield f"{file_id}_{row_id}", data | |