import datasets import os import pickle import json class OpenEADW15kV1Config(datasets.BuilderConfig): def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): super(OpenEADW15kV1Config, self).__init__(version=datasets.Version("0.0.1"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url class OpenEADW15kV1(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ OpenEADW15kV1Config( name="source", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/openea-d-w-15k-v1/resolve/main/openea-d-w-15k-v1.zip" ), OpenEADW15kV1Config( name="target", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/openea-d-w-15k-v1/resolve/main/openea-d-w-15k-v1.zip" ), OpenEADW15kV1Config( name="pairs", features=["left_id", "right_id"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/openea-d-w-15k-v1/resolve/main/openea-d-w-15k-v1.zip" ), ] def _info(self): if self.config.name=="source": features = {feature: datasets.Value("string") for feature in self.config.features} elif self.config.name=="target": features = {feature: datasets.Value("string") for feature in self.config.features} elif self.config.name=="pairs": features = {feature: datasets.Value("string") for feature in self.config.features} return datasets.DatasetInfo( features=datasets.Features(features) ) def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" #task_name = _get_task_name_from_data_url(self.config.data_url) #dl_dir = os.path.join(dl_dir, task_name) if self.config.name == "source": return [ datasets.SplitGenerator( name="attr_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "attr_triples_1"), "split": "attr_triples", }, ), datasets.SplitGenerator( name="rel_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "rel_triples_1"), "split": "rel_triples", }, ), ] elif self.config.name == "target": return [ datasets.SplitGenerator( name="attr_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "attr_triples_2"), "split": "attr_triples", }, ), datasets.SplitGenerator( name="rel_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "rel_triples_2"), "split": "rel_triples", }, ), ] elif self.config.name == "pairs": return [ datasets.SplitGenerator( name="train", gen_kwargs={ "data_file": os.path.join(dl_dir, "train_links"), "split": "train", }, ), datasets.SplitGenerator( name="valid", gen_kwargs={ "data_file": os.path.join(dl_dir, "valid_links"), "split": "valid", }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "data_file": os.path.join(dl_dir, "test_links"), "split": "test", }, ) ] def _generate_examples(self, data_file, split): f = open(data_file,"r",encoding='utf-8') data = f.readlines() for i in range(len(data)): #print(row) if self.config.name in ["source", "target"]: if split in ["ent_ids","rel_ids"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": None } elif split in ["rel_triples","rel_triples_whole"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": row[2] } elif split in ["attr_triples"]: row = data[i].rstrip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": row[2] } if self.config.name == "pairs": row = data[i].strip('\n').split('\t') yield i, { "left_id": row[0], "right_id": row[1] }