system HF staff commited on
Commit
02b9d33
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"v1.0": {"description": "\nWith billions of individual pages on the web providing information on almost every conceivable topic, we should have the ability to collect facts that answer almost every conceivable question. However, only a small fraction of this information is contained in structured sources (Wikidata, Freebase, etc.) \u2013 we are therefore limited by our ability to transform free-form text to structured knowledge. There is, however, another problem that has become the focus of a lot of recent research and media coverage: false information coming from unreliable sources. [1] [2]\n\nThe FEVER workshops are a venue for work in verifiable knowledge extraction and to stimulate progress in this direction.\n\nFEVER V1.0", "citation": "\n@inproceedings{Thorne18Fever,\n author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},\n title = {{FEVER}: a Large-scale Dataset for Fact Extraction and VERification},\n booktitle = {NAACL-HLT},\n year = {2018}\n}\n}\n", "homepage": "https://fever.ai/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_annotation_id": {"dtype": "int32", "id": null, "_type": "Value"}, "evidence_id": {"dtype": "int32", "id": null, "_type": "Value"}, "evidence_wiki_url": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_sentence_id": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "fever", "config_name": "v1.0", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29747512, "num_examples": 311431, "dataset_name": "fever"}, "unlabelled_test": {"name": "unlabelled_test", "num_bytes": 1627026, "num_examples": 19998, "dataset_name": "fever"}, "unlabelled_dev": {"name": "unlabelled_dev", "num_bytes": 1558989, "num_examples": 19998, "dataset_name": "fever"}, "labelled_dev": {"name": "labelled_dev", "num_bytes": 3661989, "num_examples": 37566, "dataset_name": "fever"}, "paper_dev": {"name": "paper_dev", "num_bytes": 1831013, "num_examples": 18999, "dataset_name": "fever"}, "paper_test": {"name": "paper_test", "num_bytes": 1830976, "num_examples": 18567, "dataset_name": "fever"}}, "download_checksums": {"https://s3-eu-west-1.amazonaws.com/fever.public/train.jsonl": {"num_bytes": 33024303, "checksum": "eba7e8f87076753f8494718b9a857827af7bf73e76c9e4b75420207d26e588b6"}, "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_dev.jsonl": {"num_bytes": 4349935, "checksum": "e89865bfe1b4dd054e03dd57d7241a6fde24862905f31117cf0cd719f7c78df7"}, "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_dev_public.jsonl": {"num_bytes": 1530640, "checksum": "acda01ae5ee7e75c73909a665f465cec20704ea26e9d676cd7423ff2c8ab0e8b"}, "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_test.jsonl": {"num_bytes": 1599159, "checksum": "76dd0872d8fa1f49efe1194fe8a88b7dd4c715c77d87a142b615d4be583e1e51"}, "https://s3-eu-west-1.amazonaws.com/fever.public/paper_dev.jsonl": {"num_bytes": 2168767, "checksum": "41158707810008747946bf23471e82df53e77a513524b9e3ec1c2e674ef5ef8c"}, "https://s3-eu-west-1.amazonaws.com/fever.public/paper_test.jsonl": {"num_bytes": 2181168, "checksum": "fb7b0280a0adc2302bbb29bfb7af37274fa585de3171bcf908f180642d11d88e"}}, "download_size": 44853972, "dataset_size": 40257505, "size_in_bytes": 85111477}, "v2.0": {"description": "\nWith billions of individual pages on the web providing information on almost every conceivable topic, we should have the ability to collect facts that answer almost every conceivable question. However, only a small fraction of this information is contained in structured sources (Wikidata, Freebase, etc.) \u2013 we are therefore limited by our ability to transform free-form text to structured knowledge. There is, however, another problem that has become the focus of a lot of recent research and media coverage: false information coming from unreliable sources. [1] [2]\n\nThe FEVER workshops are a venue for work in verifiable knowledge extraction and to stimulate progress in this direction.\n\nFEVER V2.0", "citation": "\n@inproceedings{Thorne18Fever,\n author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},\n title = {{FEVER}: a Large-scale Dataset for Fact Extraction and VERification},\n booktitle = {NAACL-HLT},\n year = {2018}\n}\n}\n", "homepage": "https://fever.ai/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_annotation_id": {"dtype": "int32", "id": null, "_type": "Value"}, "evidence_id": {"dtype": "int32", "id": null, "_type": "Value"}, "evidence_wiki_url": {"dtype": "string", "id": null, "_type": "Value"}, "evidence_sentence_id": {"dtype": "int32", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "fever", "config_name": "v2.0", "version": {"version_str": "2.0.0", "description": "", "datasets_version_to_prepare": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 307447, "num_examples": 2384, "dataset_name": "fever"}}, "download_checksums": {"https://s3-eu-west-1.amazonaws.com/fever.public/fever2-fixers-dev.jsonl": {"num_bytes": 392466, "checksum": "43c3df77cf9bf6022b9356ed1d66df6d8a9a0126c4e4b8d155742e3a9988c814"}}, "download_size": 392466, "dataset_size": 307447, "size_in_bytes": 699913}, "wiki_pages": {"description": "\nWith billions of individual pages on the web providing information on almost every conceivable topic, we should have the ability to collect facts that answer almost every conceivable question. However, only a small fraction of this information is contained in structured sources (Wikidata, Freebase, etc.) \u2013 we are therefore limited by our ability to transform free-form text to structured knowledge. There is, however, another problem that has become the focus of a lot of recent research and media coverage: false information coming from unreliable sources. [1] [2]\n\nThe FEVER workshops are a venue for work in verifiable knowledge extraction and to stimulate progress in this direction.\n\nWikipedia pages", "citation": "\n@inproceedings{Thorne18Fever,\n author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},\n title = {{FEVER}: a Large-scale Dataset for Fact Extraction and VERification},\n booktitle = {NAACL-HLT},\n year = {2018}\n}\n}\n", "homepage": "https://fever.ai/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lines": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "fever", "config_name": "wiki_pages", "version": "0.0.0", "splits": {"wikipedia_pages": {"name": "wikipedia_pages", "num_bytes": 7256829814, "num_examples": 5416537, "dataset_name": "fever"}}, "download_checksums": {"https://s3-eu-west-1.amazonaws.com/fever.public/wiki-pages.zip": {"num_bytes": 1713485474, "checksum": "4b06d95da6adf7fe02d2796176c670dacccb21348da89cba4c50676ab99665f2"}}, "download_size": 1713485474, "dataset_size": 7256829814, "size_in_bytes": 8970315288}}
dummy/v1.0/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4c4fb756db54f88cea8ad36d8b06be05359f654893e66ba4e33871c0cd08c40
3
+ size 2348
dummy/v2.0/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a20775f79daca18b3149bee34a550fc03882264f0d44deabeeb641c55b759479
3
+ size 614
dummy/wiki_pages/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48ce54757528bd89207b474c897afad5c99fbaf0eeb70cd73b481e4a92095a7
3
+ size 1297
fever.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """FEVER dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @inproceedings{Thorne18Fever,
29
+ author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},
30
+ title = {{FEVER}: a Large-scale Dataset for Fact Extraction and VERification},
31
+ booktitle = {NAACL-HLT},
32
+ year = {2018}
33
+ }
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """
38
+ With billions of individual pages on the web providing information on almost every conceivable topic, we should have the ability to collect facts that answer almost every conceivable question. However, only a small fraction of this information is contained in structured sources (Wikidata, Freebase, etc.) – we are therefore limited by our ability to transform free-form text to structured knowledge. There is, however, another problem that has become the focus of a lot of recent research and media coverage: false information coming from unreliable sources. [1] [2]
39
+
40
+ The FEVER workshops are a venue for work in verifiable knowledge extraction and to stimulate progress in this direction.
41
+ """
42
+
43
+
44
+ class FeverConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for FEVER."""
46
+
47
+ def __init__(self, **kwargs):
48
+ """BuilderConfig for FEVER
49
+
50
+ Args:
51
+ **kwargs: keyword arguments forwarded to super.
52
+ """
53
+ super(FeverConfig, self).__init__(**kwargs)
54
+
55
+
56
+ class Fever(datasets.GeneratorBasedBuilder):
57
+ """Fact Extraction and VERification Dataset."""
58
+
59
+ BUILDER_CONFIGS = [
60
+ FeverConfig(
61
+ name="v1.0",
62
+ description="FEVER V1.0",
63
+ version=datasets.Version("1.0.0", ""),
64
+ ),
65
+ FeverConfig(
66
+ name="v2.0",
67
+ description="FEVER V2.0",
68
+ version=datasets.Version("2.0.0", ""),
69
+ ),
70
+ FeverConfig(
71
+ name="wiki_pages",
72
+ description="Wikipedia pages",
73
+ version=datasets.Version("1.0.0", ""),
74
+ ),
75
+ ]
76
+
77
+ def _info(self):
78
+
79
+ if self.config.name == "wiki_pages":
80
+ features = {
81
+ "id": datasets.Value("string"),
82
+ "text": datasets.Value("string"),
83
+ "lines": datasets.Value("string"),
84
+ }
85
+ else:
86
+ features = {
87
+ "id": datasets.Value("int32"),
88
+ "label": datasets.Value("string"),
89
+ "claim": datasets.Value("string"),
90
+ "evidence_annotation_id": datasets.Value("int32"),
91
+ "evidence_id": datasets.Value("int32"),
92
+ "evidence_wiki_url": datasets.Value("string"),
93
+ "evidence_sentence_id": datasets.Value("int32"),
94
+ }
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION + "\n" + self.config.description,
97
+ features=datasets.Features(features),
98
+ homepage="https://fever.ai/",
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+ """Returns SplitGenerators."""
104
+ if self.config.name == "v2.0":
105
+ urls = "https://s3-eu-west-1.amazonaws.com/fever.public/fever2-fixers-dev.jsonl"
106
+ dl_path = dl_manager.download_and_extract(urls)
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={
111
+ "filepath": dl_path,
112
+ },
113
+ )
114
+ ]
115
+ elif self.config.name == "v1.0":
116
+ urls = {
117
+ "train": "https://s3-eu-west-1.amazonaws.com/fever.public/train.jsonl",
118
+ "labelled_dev": "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_dev.jsonl",
119
+ "unlabelled_dev": "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_dev_public.jsonl",
120
+ "unlabelled_test": "https://s3-eu-west-1.amazonaws.com/fever.public/shared_task_test.jsonl",
121
+ "paper_dev": "https://s3-eu-west-1.amazonaws.com/fever.public/paper_dev.jsonl",
122
+ "paper_test": "https://s3-eu-west-1.amazonaws.com/fever.public/paper_test.jsonl",
123
+ }
124
+ dl_path = dl_manager.download_and_extract(urls)
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "filepath": dl_path["train"],
130
+ },
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name="unlabelled_test",
134
+ gen_kwargs={
135
+ "filepath": dl_path["unlabelled_test"],
136
+ },
137
+ ),
138
+ datasets.SplitGenerator(
139
+ name="unlabelled_dev",
140
+ gen_kwargs={
141
+ "filepath": dl_path["unlabelled_dev"],
142
+ },
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name="labelled_dev",
146
+ gen_kwargs={
147
+ "filepath": dl_path["labelled_dev"],
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name="paper_dev",
152
+ gen_kwargs={
153
+ "filepath": dl_path["paper_dev"],
154
+ },
155
+ ),
156
+ datasets.SplitGenerator(
157
+ name="paper_test",
158
+ gen_kwargs={
159
+ "filepath": dl_path["paper_test"],
160
+ },
161
+ ),
162
+ ]
163
+ elif self.config.name == "wiki_pages":
164
+ urls = "https://s3-eu-west-1.amazonaws.com/fever.public/wiki-pages.zip"
165
+ dl_path = dl_manager.download_and_extract(urls)
166
+ files = sorted(os.listdir(os.path.join(dl_path, "wiki-pages")))
167
+ file_paths = [os.path.join(dl_path, "wiki-pages", file) for file in files]
168
+ return [
169
+ datasets.SplitGenerator(
170
+ name="wikipedia_pages",
171
+ gen_kwargs={
172
+ "filepath": file_paths,
173
+ },
174
+ ),
175
+ ]
176
+ else:
177
+ raise ValueError("config name not found")
178
+
179
+ def _generate_examples(self, filepath):
180
+ """Yields examples."""
181
+ if self.config.name == "v1.0" or self.config.name == "v2.0":
182
+ with open(filepath, encoding="utf-8") as f:
183
+ for row_id, row in enumerate(f):
184
+ data = json.loads(row)
185
+ id_ = data["id"]
186
+ label = data.get("label", "")
187
+ claim = data["claim"]
188
+ evidences = data.get("evidence", [])
189
+ if len(evidences) > 0:
190
+ for i in range(len(evidences)):
191
+ for j in range(len(evidences[i])):
192
+ annot_id = evidences[i][j][0] if evidences[i][j][0] else -1
193
+ evidence_id = evidences[i][j][1] if evidences[i][j][1] else -1
194
+ wiki_url = evidences[i][j][2] if evidences[i][j][2] else ""
195
+ sent_id = evidences[i][j][3] if evidences[i][j][3] else -1
196
+ yield str(row_id) + "_" + str(i) + "_" + str(j), {
197
+ "id": id_,
198
+ "label": label,
199
+ "claim": claim,
200
+ "evidence_annotation_id": annot_id,
201
+ "evidence_id": evidence_id,
202
+ "evidence_wiki_url": wiki_url,
203
+ "evidence_sentence_id": sent_id,
204
+ }
205
+ else:
206
+ yield row_id, {
207
+ "id": id_,
208
+ "label": label,
209
+ "claim": claim,
210
+ "evidence_annotation_id": -1,
211
+ "evidence_id": -1,
212
+ "evidence_wiki_url": "",
213
+ "evidence_sentence_id": -1,
214
+ }
215
+ elif self.config.name == "wiki_pages":
216
+ for file in filepath:
217
+ with open(file, encoding="utf-8") as f:
218
+ for id_, row in enumerate(f):
219
+ data = json.loads(row)
220
+ yield id_, data