LeandraFichtel
commited on
Commit
•
a38439e
1
Parent(s):
8ea0beb
Upload kamel.py
Browse files
kamel.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""The LAMA Dataset"""
|
16 |
+
|
17 |
+
|
18 |
+
import json
|
19 |
+
from fnmatch import fnmatch
|
20 |
+
|
21 |
+
import datasets
|
22 |
+
import os
|
23 |
+
|
24 |
+
|
25 |
+
_CITATION = """
|
26 |
+
@inproceedings{kalo2022kamel,
|
27 |
+
title={KAMEL: Knowledge Analysis with Multitoken Entities in Language Models},
|
28 |
+
author={Kalo, Jan-Christoph and Fichtel, Leandra},
|
29 |
+
booktitle={Automated Knowledge Base Construction},
|
30 |
+
year={2022}
|
31 |
+
}
|
32 |
+
"""
|
33 |
+
|
34 |
+
|
35 |
+
_DESCRIPTION = """This dataset provides the data for KAMEL, a probing dataset for language models that contains factual knowledge
|
36 |
+
from Wikidata and Wikipedia..
|
37 |
+
"""
|
38 |
+
|
39 |
+
_HOMEPAGE = "https://github.com/JanCKalo/KAMEL"
|
40 |
+
|
41 |
+
_LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License."
|
42 |
+
|
43 |
+
|
44 |
+
_DATA_URL = ""
|
45 |
+
|
46 |
+
_RELATIONS = ['P136', 'P1376', 'P131', 'P1971', 'P138', 'P366', 'P361', 'P3018', 'P162', 'P196', 'P1346', 'P101', 'P750', 'P106', 'P108', 'P1142', 'P137', 'P197', 'P190', 'P509', 'P2597', 'P155', 'P703', 'P1923', 'P488', 'P19', 'P26', 'P676', 'P2416', 'P17', 'P2044', '.DS_Store', 'P6087', 'P647', 'P277', 'P88', 'P1001', 'P81', 'P410', 'P279', 'P86', 'P241', 'P7153', 'P641', 'P7959', 'P20', 'P1408', 'P27', 'P87', 'P1038', 'P427', 'P840', 'P276', 'P58', 'P69', 'P291', 'P4908', 'P1441', 'P606', 'config.json', 'P467', 'P1082', 'P1412', 'P800', 'P451', 'P3373', 'P2094', 'P664', 'P200', 'P607', 'P50', 'P57', 'P264', 'P61', 'P2032', 'P59', 'P403', 'P201', 'P206', 'P466', 'P461', 'P495', 'P1619', 'P974', 'P183', 'P177', 'P1308', 'P170', 'P184', 'P945', 'P179', 'P2789', 'P710', 'P141', 'P585', 'P115', 'P571', 'P576', 'P112', 'P582', 'P5353', 'P2975', 'P1532', 'P123', 'P729', 'P140', 'P3450', 'P178', 'P1132', 'P185', 'P171', 'P149', 'P344', 'P176', 'P1103', 'P1365', 'P921', 'P541', 'P1192', 'P577', 'P113', 'P2522', 'P570', 'P706', 'P150', 'P737', 'P161', 'P195', 'P708', 'P1113', 'P166', 'P991', 'P159', 'P569', 'P931', 'P551', 'P135', 'P103', 'P1344', 'P355', 'P2936', 'P1327', 'P156', 'P364', 'P6', 'P105', 'P559', 'P102', 'P937', 'P306', 'P765', 'P40', 'P287', 'P610', 'P3764', 'P47', 'P1830', 'P414', 'P619', 'P7937', 'P413', 'P1056', 'P22', 'P1435', 'P25', 'P449', 'P412', 'P4743', 'P415', 'P84', 'P611', 'P272', 'P286', 'P6886', 'P1066', 'P1050', 'P3999', 'P674', 'P1433', 'P1411', 'P669', 'P1416', 'P463', 'P39', 'P30', 'P1249', 'P452', 'P2632', 'P37', 'P1427', 'P97', 'P36', 'P31', 'P2868', 'P2437', 'P1877', 'P802', 'P54', 'P1444', 'P4552', 'P98', 'P1027', 'P53', 'P400', 'P65', 'P2031', 'P407', 'P575', 'P740', 'P6379', 'P915', 'P3602', 'P749', 'P127', 'P118', 'P180', 'P1101', 'P1598', 'P8875', 'P126', 'P119', 'P2341', 'P1366', 'P1350', 'P2348', 'P580', 'P1191', 'P4884', 'P1303', 'P144', 'P371', 'P527', 'P175', 'P4647', 'P186', 'P172']
|
47 |
+
|
48 |
+
|
49 |
+
class KAMELConfig(datasets.BuilderConfig):
|
50 |
+
"""BuilderConfig for KAMEL."""
|
51 |
+
|
52 |
+
def __init__(self, relations=None, **kwargs):
|
53 |
+
|
54 |
+
super(KAMELConfig, self).__init__(**kwargs)
|
55 |
+
self.relations = relations if relations is not None else _RELATIONS
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
class Kamel(datasets.GeneratorBasedBuilder):
|
60 |
+
"""KAMEL Dataset"""
|
61 |
+
|
62 |
+
VERSION = datasets.Version("1.0.0")
|
63 |
+
BUILDER_CONFIG_CLASS = KAMELConfig
|
64 |
+
BUILDER_CONFIGS = [
|
65 |
+
KAMELConfig(
|
66 |
+
name="all",
|
67 |
+
relations=None,
|
68 |
+
version=VERSION,
|
69 |
+
description="Import of KAMEL.",
|
70 |
+
)
|
71 |
+
]
|
72 |
+
|
73 |
+
def _info(self):
|
74 |
+
features = datasets.Features(
|
75 |
+
{
|
76 |
+
"index": datasets.Value("string"),
|
77 |
+
"obj_uri": datasets.Value("string"),
|
78 |
+
"obj_label": datasets.Value("string"),
|
79 |
+
"sub_label": datasets.Value("string"),
|
80 |
+
"predicate_id": datasets.Value("string")
|
81 |
+
|
82 |
+
}
|
83 |
+
)
|
84 |
+
return datasets.DatasetInfo(
|
85 |
+
description=_DESCRIPTION,
|
86 |
+
features=features,
|
87 |
+
supervised_keys=None,
|
88 |
+
homepage=_HOMEPAGE,
|
89 |
+
license=_LICENSE,
|
90 |
+
citation=_CITATION,
|
91 |
+
|
92 |
+
)
|
93 |
+
|
94 |
+
# TODO The generator class is not working!
|
95 |
+
def _split_generators(self, dl_manager):
|
96 |
+
dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
97 |
+
|
98 |
+
|
99 |
+
return [
|
100 |
+
datasets.SplitGenerator(
|
101 |
+
name=datasets.Split.TRAIN,
|
102 |
+
gen_kwargs={
|
103 |
+
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
104 |
+
"split": datasets.Split.TRAIN,
|
105 |
+
},
|
106 |
+
),
|
107 |
+
datasets.SplitGenerator(
|
108 |
+
name=datasets.Split.VALIDATION,
|
109 |
+
gen_kwargs={
|
110 |
+
"data_file": os.path.join(dl_dir, "val.jsonl"),
|
111 |
+
"split": datasets.Split.VALIDATION,
|
112 |
+
},
|
113 |
+
),
|
114 |
+
datasets.SplitGenerator(
|
115 |
+
name=datasets.Split.TEST,
|
116 |
+
gen_kwargs={
|
117 |
+
"data_file": os.path.join(dl_dir, "test.jsonl"),
|
118 |
+
"split": datasets.Split.TEST,
|
119 |
+
},
|
120 |
+
),
|
121 |
+
]
|
122 |
+
|
123 |
+
def _generate_examples(self, filepath, split):
|
124 |
+
id_ = -1
|
125 |
+
"""Yields examples from the KAMEL dataset."""
|
126 |
+
|
127 |
+
templates = {}
|
128 |
+
with open(os.path.join(filepath, "templates.jsonl"), encoding="utf-8") as fp:
|
129 |
+
for line in fp:
|
130 |
+
line = json.loads(line)
|
131 |
+
templates[line["relation"]] = line["template"]
|
132 |
+
for relation in self.config.relations:
|
133 |
+
# load triples
|
134 |
+
with open(os.path.join(filepath ,f"{relation}/{split}.jsonl"), encoding="utf-8") as fp:
|
135 |
+
for line in fp:
|
136 |
+
triple = json.loads(line)
|
137 |
+
|
138 |
+
id_ += 1
|
139 |
+
yield id_, triple
|