HumorRP commited on
Commit
2a62b4b
1 Parent(s): c4d359b

Create selfkg-dwy100k-dbpwd.py

Browse files
Files changed (1) hide show
  1. selfkg-dwy100k-dbpwd.py +194 -0
selfkg-dwy100k-dbpwd.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pickle
4
+
5
+ import datasets
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+ # _SUBFIELD = "yg"
10
+
11
+ # _VERSION = "1.0.0"
12
+
13
+
14
+ # _CITATION = """\
15
+ # @inproceedings{sun2018bootstrapping,
16
+ # title={Bootstrapping Entity Alignment with Knowledge Graph Embedding.},
17
+ # author={Sun, Zequn and Hu, Wei and Zhang, Qingheng and Qu, Yuzhong},
18
+ # booktitle={IJCAI},
19
+ # volume={18},
20
+ # pages={4396--4402},
21
+ # year={2018}
22
+ # }
23
+ # """
24
+
25
+ # _URL = "https://dl.acm.org/doi/10.1145/3485447.3511945"
26
+
27
+ # _PREFIX = "https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpwd"
28
+
29
+
30
+ class SelfkgDwy100kwdConfig(datasets.BuilderConfig):
31
+ """BuilderConfig for Selfkg-DWY100k."""
32
+
33
+ def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
34
+ """
35
+ Args:
36
+ **kwargs: keyword arguments forwarded to super.
37
+ """
38
+ super(SelfkgDwy100kwdConfig, self).__init__(**kwargs)
39
+ self.features = features
40
+ self.label_classes = label_classes
41
+ self.data_url = data_url
42
+ self.citation = citation
43
+ self.url = url
44
+
45
+ class DWY100kWd(datasets.GeneratorBasedBuilder):
46
+ """DWY100k-wd: A Entity Alignment Dataset."""
47
+
48
+ BUILDER_CONFIGS = [
49
+ SelfkgDwy100kwdConfig(
50
+ name="source",
51
+ features=["column1", "column2", "column3"],
52
+ citation="TODO",
53
+ url="TODO",
54
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpwd/resolve/main/selfkg-dwy100k-dbpwd.zip"
55
+ ),
56
+ SelfkgDwy100kwdConfig(
57
+ name="target",
58
+ features=["column1", "column2", "column3"],
59
+ citation="TODO",
60
+ url="TODO",
61
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpwd/resolve/main/selfkg-dwy100k-dbpwd.zip"
62
+ ),
63
+ SelfkgDwy100kwdConfig(
64
+ name="pairs",
65
+ features=["left_id","right_id"],
66
+ citation="TODO",
67
+ url="TODO",
68
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpwd/resolve/main/selfkg-dwy100k-dbpwd.zip"
69
+ ),
70
+ ]
71
+
72
+ def _info(self) -> datasets.DatasetInfo:
73
+ if self.config.name=="source":
74
+ features = {feature: datasets.Value("string") for feature in self.config.features}
75
+ elif self.config.name=="target":
76
+ features = {feature: datasets.Value("string") for feature in self.config.features}
77
+ elif self.config.name=="pairs":
78
+ features = {feature: datasets.Value("int32") for feature in self.config.features}
79
+
80
+ return datasets.DatasetInfo(features = datasets.Features(features))
81
+
82
+
83
+ def _split_generators(self, dl_manager):
84
+
85
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
86
+ if self.config.name == "source":
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name="ent_ids",
90
+ gen_kwargs={
91
+ "data_file": os.path.join(dl_dir, "id_ent_1"),
92
+ "split": "ent_ids",
93
+ },
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name="rel_triples_id",
97
+ gen_kwargs={
98
+ "data_file": os.path.join(dl_dir, "triples_1"),
99
+ "split": "rel_triples_id",
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name="LaBSE_emb",
104
+ gen_kwargs={
105
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_1.pkl"),
106
+ "split": "LaBSE_emb",
107
+ },
108
+ ),
109
+ ]
110
+ elif self.config.name == "target":
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name="ent_ids",
114
+ gen_kwargs={
115
+ "data_file": os.path.join(dl_dir, "id_ent_2"),
116
+ "split": "ent_ids",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name="rel_triples_id",
121
+ gen_kwargs={
122
+ "data_file": os.path.join(dl_dir, "triples_2"),
123
+ "split": "rel_triples_id",
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name="LaBSE_emb",
128
+ gen_kwargs={
129
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_2.pkl"),
130
+ "split": "LaBSE_emb",
131
+ },
132
+ ),
133
+ ]
134
+ elif self.config.name == "pairs":
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name="train",
138
+ gen_kwargs={
139
+ "data_file": os.path.join(dl_dir, "ref_ent_ids"),
140
+ "split": "train",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name="valid",
145
+ gen_kwargs={
146
+ "data_file": os.path.join(dl_dir, "valid.ref"),
147
+ "split": "valid",
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name="test",
152
+ gen_kwargs={
153
+ "data_file": os.path.join(dl_dir, "ref_ent_ids"),
154
+ "split": "test",
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, data_file, split):
160
+ if split in ["LaBSE_emb"]:
161
+ des = pickle.load(open(data_file,"rb"))
162
+ i = -1
163
+ for ent_ids,ori_emb in des.items():
164
+ i += 1
165
+ yield i, {
166
+ "column1": ent_ids,
167
+ "column2": ori_emb,
168
+ "column3": None
169
+ }
170
+ else:
171
+ f = open(data_file,"r", encoding='utf-8')
172
+ data = f.readlines()
173
+ for i in range(len(data)):
174
+ if self.config.name in ["source", "target"]:
175
+ if split in ["ent_ids"]:
176
+ row = data[i].strip('\n').split('\t')
177
+ yield i, {
178
+ "column1": row[0],
179
+ "column2": row[1],
180
+ "column3": None
181
+ }
182
+ elif split in ["rel_triples_id"]:
183
+ row = data[i].strip('\n').split('\t')
184
+ yield i, {
185
+ "column1": row[0],
186
+ "column2": row[1],
187
+ "column3": row[2]
188
+ }
189
+ if self.config.name == "pairs":
190
+ row = data[i].strip('\n').split('\t')
191
+ yield i, {
192
+ "left_id": row[0],
193
+ "right_id": row[1]
194
+ }