system HF staff commited on
Commit
4630c14
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"2019-08-01": {"description": "KILT-Wikipedia: Wikipedia pre-processed for KILT.\n", "citation": "@inproceedings{fb_kilt,\n author = {Fabio Petroni and\n Aleksandra Piktus and\n Angela Fan and\n Patrick Lewis and\n Majid Yazdani and\n Nicola De Cao and\n James Thorne and\n Yacine Jernite and\n Vassilis Plachouras and\n Tim Rockt\"aschel and\n Sebastian Riedel},\n title = {{KILT:} a {B}enchmark for {K}nowledge {I}ntensive {L}anguage {T}asks},\n journal = {CoRR},\n archivePrefix = {arXiv},\n year = {2020},\n", "homepage": "https://github.com/facebookresearch/KILT", "license": "", "features": {"kilt_id": {"dtype": "string", "id": null, "_type": "Value"}, "wikipedia_id": {"dtype": "string", "id": null, "_type": "Value"}, "wikipedia_title": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"feature": {"paragraph": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "anchors": {"feature": {"paragraph_id": {"dtype": "int32", "id": null, "_type": "Value"}, "start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "href": {"dtype": "string", "id": null, "_type": "Value"}, "wikipedia_title": {"dtype": "string", "id": null, "_type": "Value"}, "wikipedia_id": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "categories": {"dtype": "string", "id": null, "_type": "Value"}, "wikidata_info": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "enwikiquote_title": {"dtype": "string", "id": null, "_type": "Value"}, "wikidata_id": {"dtype": "string", "id": null, "_type": "Value"}, "wikidata_label": {"dtype": "string", "id": null, "_type": "Value"}, "wikipedia_title": {"dtype": "string", "id": null, "_type": "Value"}, "aliases": {"feature": {"alias": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "history": {"pageid": {"dtype": "int32", "id": null, "_type": "Value"}, "parentid": {"dtype": "int32", "id": null, "_type": "Value"}, "revid": {"dtype": "int32", "id": null, "_type": "Value"}, "pre_dump": {"dtype": "bool", "id": null, "_type": "Value"}, "timestamp": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": {"features": null, "resources_checksums": {"full": {}}}, "supervised_keys": null, "builder_name": "kilt_wikipedia", "config_name": "2019-08-01", "version": {"version_str": "1.0.0", "description": "Wikipedia pre-processed for KILT", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"full": {"name": "full", "num_bytes": 29372535718, "num_examples": 5903530, "dataset_name": "kilt_wikipedia"}}, "download_checksums": {"http://dl.fbaipublicfiles.com/KILT/kilt_knowledgesource.json": {"num_bytes": 37318876722, "checksum": "f966d6f09c4ff91656db5c56c384f136b0c495c7083c043586b8cb1033c389a5"}}, "download_size": 37318876722, "post_processing_size": 0, "dataset_size": 29372535718, "size_in_bytes": 66691412440}}
dummy/2019-08-01/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1965aa0b6df1805c977b789ccebbf9f1057a41346cdcac751822c22f1620b87
3
+ size 335
kilt_wikipedia.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wikipedia knowledge source for KILT"""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import logging
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{fb_kilt,
29
+ author = {Fabio Petroni and
30
+ Aleksandra Piktus and
31
+ Angela Fan and
32
+ Patrick Lewis and
33
+ Majid Yazdani and
34
+ Nicola De Cao and
35
+ James Thorne and
36
+ Yacine Jernite and
37
+ Vassilis Plachouras and
38
+ Tim Rockt\"aschel and
39
+ Sebastian Riedel},
40
+ title = {{KILT:} a {B}enchmark for {K}nowledge {I}ntensive {L}anguage {T}asks},
41
+ journal = {CoRR},
42
+ archivePrefix = {arXiv},
43
+ year = {2020},
44
+ """
45
+
46
+ _DESCRIPTION = """\
47
+ KILT-Wikipedia: Wikipedia pre-processed for KILT.
48
+ """
49
+
50
+
51
+ class KILTWikipediaConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for KILTWikipedia."""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for KILTWikipedia.
56
+
57
+ Args:
58
+ .
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super(KILTWikipediaConfig, self).__init__(
62
+ version=datasets.Version("1.0.0", "Wikipedia pre-processed for KILT"), **kwargs
63
+ )
64
+
65
+
66
+ class KILTWikipedia(datasets.GeneratorBasedBuilder):
67
+ """KILTWikipedia: Wikipedia pre-processed for KILT. Version 1.0."""
68
+
69
+ BUILDER_CONFIGS = [
70
+ KILTWikipediaConfig(
71
+ name="2019-08-01",
72
+ description="Wikipedia pre-processed for KILT from 2019/08/01 dump",
73
+ ),
74
+ ]
75
+
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "kilt_id": datasets.Value("string"),
82
+ "wikipedia_id": datasets.Value("string"),
83
+ "wikipedia_title": datasets.Value("string"),
84
+ "text": datasets.features.Sequence({"paragraph": datasets.Value("string")}),
85
+ "anchors": datasets.features.Sequence(
86
+ {
87
+ "paragraph_id": datasets.Value("int32"),
88
+ "start": datasets.Value("int32"),
89
+ "end": datasets.Value("int32"),
90
+ "text": datasets.Value("string"),
91
+ "href": datasets.Value("string"),
92
+ "wikipedia_title": datasets.Value("string"),
93
+ "wikipedia_id": datasets.Value("string"),
94
+ }
95
+ ),
96
+ "categories": datasets.Value("string"),
97
+ "wikidata_info": datasets.Features(
98
+ {
99
+ "description": datasets.Value("string"),
100
+ "enwikiquote_title": datasets.Value("string"),
101
+ "wikidata_id": datasets.Value("string"),
102
+ "wikidata_label": datasets.Value("string"),
103
+ "wikipedia_title": datasets.Value("string"),
104
+ "aliases": datasets.features.Sequence({"alias": datasets.Value("string")}),
105
+ }
106
+ ),
107
+ "history": datasets.Features(
108
+ {
109
+ "pageid": datasets.Value("int32"),
110
+ "parentid": datasets.Value("int32"),
111
+ "revid": datasets.Value("int32"),
112
+ "pre_dump": datasets.Value("bool"),
113
+ "timestamp": datasets.Value("string"),
114
+ "url": datasets.Value("string"),
115
+ }
116
+ ),
117
+ }
118
+ ),
119
+ # No default supervised_keys (as we have to pass both premise
120
+ # and hypothesis as input).
121
+ supervised_keys=None,
122
+ homepage="https://github.com/facebookresearch/KILT",
123
+ citation=_CITATION,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager):
127
+
128
+ downloaded_path = dl_manager.download_and_extract(
129
+ "http://dl.fbaipublicfiles.com/KILT/kilt_knowledgesource.json"
130
+ )
131
+
132
+ return [
133
+ datasets.SplitGenerator(name="full", gen_kwargs={"filepath": downloaded_path}),
134
+ ]
135
+
136
+ def _generate_examples(self, filepath):
137
+ """Generate Wikipedia articles for KILT.
138
+
139
+ Args:
140
+ filepath: a string
141
+
142
+ Yields:
143
+ dictionaries representing article data and metadata
144
+ """
145
+ logging.info("generating examples from = %s", filepath)
146
+ with open(filepath, encoding="utf-8") as f:
147
+ for idx, line in enumerate(f):
148
+ pre_article = json.loads(line.strip())
149
+ article = dict([(k, pre_article[k]) for k in ["wikipedia_id", "wikipedia_title", "categories"]])
150
+ # wikidata
151
+ article["wikidata_info"] = {}
152
+ pre_article["wikidata_info"] = pre_article.get("wikidata_info", {})
153
+ if pre_article["wikidata_info"].get("aliases", None) is None:
154
+ pre_article["wikidata_info"]["aliases"] = []
155
+ for k in ["description", "enwikiquote_title", "wikidata_id", "wikidata_label", "wikipedia_title"]:
156
+ val = pre_article["wikidata_info"].get(k, None)
157
+ article["wikidata_info"][k] = "" if val is None else val
158
+ article["wikidata_info"]["aliases"] = {"alias": pre_article["wikidata_info"]["aliases"]}
159
+ # history
160
+ article["history"] = {}
161
+ pre_article["history"] = pre_article.get("history", {})
162
+ pre_dump = pre_article["history"].get("pre_dump", None)
163
+ article["history"]["pre_dump"] = False if pre_dump is None else pre_dump
164
+ for k in ["pageid", "parentid", "revid"]:
165
+ val = pre_article["history"].get(k, None)
166
+ article["history"][k] = -1 if val is None else val
167
+ for k in ["timestamp", "url"]:
168
+ val = pre_article["history"].get(k, None)
169
+ article["history"][k] = "" if val is None else val
170
+ # everything else
171
+ article["kilt_id"] = pre_article["_id"]
172
+ article["text"] = {"paragraph": pre_article["text"]}
173
+ article["anchors"] = {}
174
+ for k in ["paragraph_id", "start", "end", "text", "href", "wikipedia_title", "wikipedia_id"]:
175
+ article["anchors"][k] = [anchor.get(k, "") for anchor in pre_article["anchors"]]
176
+ yield idx, article