# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The LAMA Dataset""" import json from fnmatch import fnmatch import datasets import os _CITATION = """ @inproceedings{kalo2022kamel, title={KAMEL: Knowledge Analysis with Multitoken Entities in Language Models}, author={Kalo, Jan-Christoph and Fichtel, Leandra}, booktitle={Automated Knowledge Base Construction}, year={2022} } """ _DESCRIPTION = """This dataset provides the data for KAMEL, a probing dataset for language models that contains factual knowledge from Wikidata and Wikipedia.. """ _HOMEPAGE = "https://github.com/JanCKalo/KAMEL" _LICENSE = "The Creative Commons Attribution-Noncommercial 4.0 International License." _DATA_URL = "https://github.com/JanKalo/KAMEL/blob/master/data/kamel.zip?raw=true" _RELATIONS = ['P136', 'P1376', 'P131', 'P1971', 'P138', 'P366', 'P361', 'P3018', 'P162', 'P196', 'P1346', 'P101', 'P750', 'P106', 'P108', 'P1142', 'P137', 'P197', 'P190', 'P509', 'P2597', 'P155', 'P703', 'P1923', 'P488', 'P19', 'P26', 'P676', 'P2416', 'P17', 'P2044', '.DS_Store', 'P6087', 'P647', 'P277', 'P88', 'P1001', 'P81', 'P410', 'P279', 'P86', 'P241', 'P7153', 'P641', 'P7959', 'P20', 'P1408', 'P27', 'P87', 'P1038', 'P427', 'P840', 'P276', 'P58', 'P69', 'P291', 'P4908', 'P1441', 'P606', 'config.json', 'P467', 'P1082', 'P1412', 'P800', 'P451', 'P3373', 'P2094', 'P664', 'P200', 'P607', 'P50', 'P57', 'P264', 'P61', 'P2032', 'P59', 'P403', 'P201', 'P206', 'P466', 'P461', 'P495', 'P1619', 'P974', 'P183', 'P177', 'P1308', 'P170', 'P184', 'P945', 'P179', 'P2789', 'P710', 'P141', 'P585', 'P115', 'P571', 'P576', 'P112', 'P582', 'P5353', 'P2975', 'P1532', 'P123', 'P729', 'P140', 'P3450', 'P178', 'P1132', 'P185', 'P171', 'P149', 'P344', 'P176', 'P1103', 'P1365', 'P921', 'P541', 'P1192', 'P577', 'P113', 'P2522', 'P570', 'P706', 'P150', 'P737', 'P161', 'P195', 'P708', 'P1113', 'P166', 'P991', 'P159', 'P569', 'P931', 'P551', 'P135', 'P103', 'P1344', 'P355', 'P2936', 'P1327', 'P156', 'P364', 'P6', 'P105', 'P559', 'P102', 'P937', 'P306', 'P765', 'P40', 'P287', 'P610', 'P3764', 'P47', 'P1830', 'P414', 'P619', 'P7937', 'P413', 'P1056', 'P22', 'P1435', 'P25', 'P449', 'P412', 'P4743', 'P415', 'P84', 'P611', 'P272', 'P286', 'P6886', 'P1066', 'P1050', 'P3999', 'P674', 'P1433', 'P1411', 'P669', 'P1416', 'P463', 'P39', 'P30', 'P1249', 'P452', 'P2632', 'P37', 'P1427', 'P97', 'P36', 'P31', 'P2868', 'P2437', 'P1877', 'P802', 'P54', 'P1444', 'P4552', 'P98', 'P1027', 'P53', 'P400', 'P65', 'P2031', 'P407', 'P575', 'P740', 'P6379', 'P915', 'P3602', 'P749', 'P127', 'P118', 'P180', 'P1101', 'P1598', 'P8875', 'P126', 'P119', 'P2341', 'P1366', 'P1350', 'P2348', 'P580', 'P1191', 'P4884', 'P1303', 'P144', 'P371', 'P527', 'P175', 'P4647', 'P186', 'P172'] class KAMELConfig(datasets.BuilderConfig): """BuilderConfig for KAMEL.""" def __init__(self, relations=None, **kwargs): super(KAMELConfig, self).__init__(**kwargs) self.relations = relations if relations is not None else _RELATIONS class Kamel(datasets.GeneratorBasedBuilder): """KAMEL Dataset""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIG_CLASS = KAMELConfig BUILDER_CONFIGS = [ KAMELConfig( name="all", relations=None, version=VERSION, description="Import of KAMEL.", ) ] def _info(self): features = datasets.Features( { "index": datasets.Value("string"), "relation_id": datasets.Value("string"), "obj_uri": datasets.Value("string"), "obj_label": datasets.Value("string"), "sub_label": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) # TODO The generator class is not working! def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(_DATA_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": dl_dir, "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": dl_dir, "split": "dev", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": dl_dir, "split": "test", }, ), ] def _generate_examples(self, filepath, split): id_ = -1 """Yields examples from the KAMEL dataset.""" for relation in self.config.relations: # load triples with open(os.path.join(filepath, relation, f"{split}.jsonl"), encoding="utf-8") as fp: for line in fp: triple = json.loads(line) triple["relation_id"] = relation id_ += 1 yield id_, triple