Datasets:

Sub-tasks: fact-checking
Languages: English
Multilinguality: monolingual
Size Categories: 1K<n<10K
Language Creators: found
Annotations Creators: expert-generated
Source Datasets: original
system HF staff commited on
Commit
521533d
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"corpus": {"description": "SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales\n", "citation": "@inproceedings{scifact2020\n title={ Fact or Fiction: Verifying Scientific Claims},\n author={David, Wadden and Kyle, Lo and Lucy Lu, Wang and Shanchuan, Lin and Madeleine van, Zuylen and Arman, Cohan and Hannaneh, Hajishirzi},\n booktitle={2011 AAAI Spring Symposium Series},\n year={2020},\n}\n", "homepage": "https://scifact.apps.allenai.org/", "license": "", "features": {"doc_id": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "abstract": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "structured": {"dtype": "bool", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scifact", "config_name": "corpus", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8002556, "num_examples": 5183, "dataset_name": "scifact"}}, "download_checksums": {"https://ai2-s2-scifact.s3-us-west-2.amazonaws.com/release/2020-05-01/data.tar.gz": {"num_bytes": 2848693, "checksum": "108c19d1a6e2522f20055fe503450c6af8c9c12a095fd727f21894cc44eb47aa"}}, "download_size": 2848693, "dataset_size": 8002556, "size_in_bytes": 10851249}, "claims": {"description": "SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales\n", "citation": "@inproceedings{scifact2020\n title={ Fact or Fiction: Verifying Scientific Claims},\n author={David, Wadden and Kyle, Lo and Lucy Lu, Wang and Shanchuan, Lin and Madeleine van, Zuylen and Arman, Cohan and Hannaneh, Hajishirzi},\n booktitle={2011 AAAI Spring Symposium Series},\n year={2020},\n}\n", "homepage": "https://scifact.apps.allenai.org/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_doc_id": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_label": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_sentences": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "cited_doc_ids": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "scifact", "config_name": "claims", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 170070, "num_examples": 1261, "dataset_name": "scifact"}, "test": {"name": "test", "num_bytes": 33899, "num_examples": 300, "dataset_name": "scifact"}, "validation": {"name": "validation", "num_bytes": 60882, "num_examples": 450, "dataset_name": "scifact"}}, "download_checksums": {"https://ai2-s2-scifact.s3-us-west-2.amazonaws.com/release/2020-05-01/data.tar.gz": {"num_bytes": 2848693, "checksum": "108c19d1a6e2522f20055fe503450c6af8c9c12a095fd727f21894cc44eb47aa"}}, "download_size": 2848693, "dataset_size": 264851, "size_in_bytes": 3113544}}
dummy/claims/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43fc6639f7aa4c97cf8d77ffdffcae90df9c5231547af3be7b5bd382a7ff9189
3
+ size 1659
dummy/corpus/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0854510f54a4589c062b8f02f2afc868fd310caaa622a452385792b2e940c52d
3
+ size 2197
scifact.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(scifact): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(scifact): BibTeX citation
12
+ _CITATION = """\
13
+ @inproceedings{scifact2020
14
+ title={ Fact or Fiction: Verifying Scientific Claims},
15
+ author={David, Wadden and Kyle, Lo and Lucy Lu, Wang and Shanchuan, Lin and Madeleine van, Zuylen and Arman, Cohan and Hannaneh, Hajishirzi},
16
+ booktitle={2011 AAAI Spring Symposium Series},
17
+ year={2020},
18
+ }
19
+ """
20
+
21
+ # TODO(scifact):
22
+ _DESCRIPTION = """\
23
+ SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales
24
+ """
25
+
26
+ _URL = "https://ai2-s2-scifact.s3-us-west-2.amazonaws.com/release/2020-05-01/data.tar.gz"
27
+
28
+
29
+ class ScifactConfig(datasets.BuilderConfig):
30
+ """BuilderConfig for Scifact"""
31
+
32
+ def __init__(self, **kwargs):
33
+ """
34
+
35
+ Args:
36
+ **kwargs: keyword arguments forwarded to super.
37
+ """
38
+ super(ScifactConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
39
+
40
+
41
+ class Scifact(datasets.GeneratorBasedBuilder):
42
+ """TODO(scifact): Short description of my dataset."""
43
+
44
+ # TODO(scifact): Set up version.
45
+ VERSION = datasets.Version("0.1.0")
46
+ BUILDER_CONFIGS = [
47
+ ScifactConfig(name="corpus", description=" The corpus of evidence documents"),
48
+ ScifactConfig(name="claims", description=" The claims are split into train, test, dev"),
49
+ ]
50
+
51
+ def _info(self):
52
+ # TODO(scifact): Specifies the datasets.DatasetInfo object
53
+ if self.config.name == "corpus":
54
+ features = {
55
+ "doc_id": datasets.Value("int32"), # The document's S2ORC ID.
56
+ "title": datasets.Value("string"), # The title.
57
+ "abstract": datasets.features.Sequence(
58
+ datasets.Value("string")
59
+ ), # The abstract, written as a list of sentences.
60
+ "structured": datasets.Value("bool"), # Indicator for whether this is a structured abstract.
61
+ }
62
+ else:
63
+ features = {
64
+ "id": datasets.Value("int32"), # An integer claim ID.
65
+ "claim": datasets.Value("string"), # The text of the claim.
66
+ "evidence_doc_id": datasets.Value("string"),
67
+ "evidence_label": datasets.Value("string"), # Label for the rationale.
68
+ "evidence_sentences": datasets.features.Sequence(datasets.Value("int32")), # Rationale sentences.
69
+ "cited_doc_ids": datasets.features.Sequence(datasets.Value("int32")), # The claim's "cited documents".
70
+ }
71
+
72
+ return datasets.DatasetInfo(
73
+ # This is the description that will appear on the datasets page.
74
+ description=_DESCRIPTION,
75
+ # datasets.features.FeatureConnectors
76
+ features=datasets.Features(
77
+ features
78
+ # These are the features of your dataset like images, labels ...
79
+ ),
80
+ # If there's a common (input, target) tuple from the features,
81
+ # specify them here. They'll be used if as_supervised=True in
82
+ # builder.as_dataset.
83
+ supervised_keys=None,
84
+ # Homepage of the dataset for documentation
85
+ homepage="https://scifact.apps.allenai.org/",
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ """Returns SplitGenerators."""
91
+ # TODO(scifact): Downloads the data and defines the splits
92
+ # dl_manager is a datasets.download.DownloadManager that can be used to
93
+ # download and extract URLs
94
+ dl_dir = dl_manager.download_and_extract(_URL)
95
+
96
+ if self.config.name == "corpus":
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ # These kwargs will be passed to _generate_examples
101
+ gen_kwargs={"filepath": os.path.join(dl_dir, "data", "corpus.jsonl"), "split": "train"},
102
+ ),
103
+ ]
104
+ else:
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_train.jsonl"), "split": "train"},
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TEST,
113
+ # These kwargs will be passed to _generate_examples
114
+ gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_test.jsonl"), "split": "test"},
115
+ ),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.VALIDATION,
118
+ # These kwargs will be passed to _generate_examples
119
+ gen_kwargs={"filepath": os.path.join(dl_dir, "data", "claims_dev.jsonl"), "split": "dev"},
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath, split):
124
+ """Yields examples."""
125
+ # TODO(scifact): Yields (key, example) tuples from the dataset
126
+ with open(filepath, encoding="utf-8") as f:
127
+ for id_, row in enumerate(f):
128
+ data = json.loads(row)
129
+ if self.config.name == "corpus":
130
+ yield id_, {
131
+ "doc_id": int(data["doc_id"]),
132
+ "title": data["title"],
133
+ "abstract": data["abstract"],
134
+ "structured": data["structured"],
135
+ }
136
+ else:
137
+ if split == "test":
138
+ yield id_, {
139
+ "id": data["id"],
140
+ "claim": data["claim"],
141
+ "evidence_doc_id": "",
142
+ "evidence_label": "",
143
+ "evidence_sentences": [],
144
+ "cited_doc_ids": [],
145
+ }
146
+ else:
147
+ evidences = data["evidence"]
148
+ if evidences:
149
+ for id1, doc_id in enumerate(evidences):
150
+ for id2, evidence in enumerate(evidences[doc_id]):
151
+ yield str(id_) + "_" + str(id1) + "_" + str(id2), {
152
+ "id": data["id"],
153
+ "claim": data["claim"],
154
+ "evidence_doc_id": doc_id,
155
+ "evidence_label": evidence["label"],
156
+ "evidence_sentences": evidence["sentences"],
157
+ "cited_doc_ids": data.get("cited_doc_ids", []),
158
+ }
159
+ else:
160
+ yield id_, {
161
+ "id": data["id"],
162
+ "claim": data["claim"],
163
+ "evidence_doc_id": "",
164
+ "evidence_label": "",
165
+ "evidence_sentences": [],
166
+ "cited_doc_ids": data.get("cited_doc_ids", []),
167
+ }