HumorRP commited on
Commit
d8ab718
1 Parent(s): e0995c9

Upload selfkgdwy100kdbpyg.py

Browse files
Files changed (1) hide show
  1. selfkgdwy100kdbpyg.py +203 -0
selfkgdwy100kdbpyg.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pickle
4
+
5
+ import datasets
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+ _SUBFIELD = "yg"
10
+
11
+ _VERSION = "1.0.0"
12
+
13
+ _DESCRIPTION = """\
14
+ DWY100k-yg is a large-scale monolingual dataset extracted from DBpedia and YAGO3. The suffix yg means DBpedia
15
+ to YAGO3. And DWY100k-yg has 100,000 reference entity alignments.
16
+ """
17
+
18
+ _CITATION = """\
19
+ @inproceedings{sun2018bootstrapping,
20
+ title={Bootstrapping Entity Alignment with Knowledge Graph Embedding.},
21
+ author={Sun, Zequn and Hu, Wei and Zhang, Qingheng and Qu, Yuzhong},
22
+ booktitle={IJCAI},
23
+ volume={18},
24
+ pages={4396--4402},
25
+ year={2018}
26
+ }
27
+ """
28
+
29
+ _URL = "https://dl.acm.org/doi/10.1145/3485447.3511945"
30
+
31
+ _PREFIX = "https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg"
32
+
33
+ _URLS = {
34
+ "source": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-src.zip",
35
+ "target": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-tgt.zip",
36
+ "pairs": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-pairs.zip",
37
+ }
38
+
39
+ class SelfkgDwy100kygConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for Selfkg-DWY100k."""
41
+
42
+ def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
43
+ """
44
+ Args:
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+ super(SelfkgDwy100kygConfig, self).__init__(**kwargs)
48
+ self.features = features
49
+ self.label_classes = label_classes
50
+ self.data_url = data_url
51
+ self.citation = citation
52
+ self.url = url
53
+
54
+ class DWY100kYg(datasets.GeneratorBasedBuilder):
55
+ """DWY100k-yg: A Entity Alignment Dataset. From DBpedia to YAGO3."""
56
+
57
+ BUILDER_CONFIGS = [
58
+ SelfkgDwy100kygConfig(
59
+ name="source",
60
+ features=["column1", "column2", "column3"],
61
+ citation="TODO",
62
+ url="TODO",
63
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
64
+ ),
65
+ SelfkgDwy100kygConfig(
66
+ name="target",
67
+ features=["column1", "column2", "column3"],
68
+ citation="TODO",
69
+ url="TODO",
70
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
71
+ ),
72
+ SelfkgDwy100kygConfig(
73
+ name="pairs",
74
+ features=["left_id","right_id"],
75
+ citation="TODO",
76
+ url="TODO",
77
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
78
+ )
79
+ ]
80
+
81
+ def _info(self) -> datasets.DatasetInfo:
82
+ if self.config.name=="source":
83
+ features = {feature: datasets.Value("string") for feature in self.config.features}
84
+ elif self.config.name=="target":
85
+ features = {feature: datasets.Value("string") for feature in self.config.features}
86
+ elif self.config.name=="pairs":
87
+ features = {feature: datasets.Value("int32") for feature in self.config.features}
88
+
89
+ return datasets.DatasetInfo(description = _DESCRIPTION,
90
+ features = datasets.Features(features))
91
+
92
+
93
+ def _split_generators(self, dl_manager):
94
+
95
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
96
+ if self.config.name == "source":
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name="ent_ids",
100
+ gen_kwargs={
101
+ "data_file": os.path.join(dl_dir, "id_ent_1"),
102
+ "split": "ent_ids",
103
+ },
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name="rel_triples_id",
107
+ gen_kwargs={
108
+ "data_file": os.path.join(dl_dir, "triples_1"),
109
+ "split": "rel_triples",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name="LaBSE_emb",
114
+ gen_kwargs={
115
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_1.pkl"),
116
+ "split": "embedding",
117
+ },
118
+ ),
119
+ ]
120
+ elif self.config.name == "target":
121
+ return [
122
+ datasets.SplitGenerator(
123
+ name="ent_ids",
124
+ gen_kwargs={
125
+ "data_file": os.path.join(dl_dir, "id_ent_2"),
126
+ "split": "ent_ids",
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name="rel_triples_id",
131
+ gen_kwargs={
132
+ "data_file": os.path.join(dl_dir, "triples_2"),
133
+ "split": "rel_triples",
134
+ },
135
+ ),
136
+ datasets.SplitGenerator(
137
+ name="LaBSE_emb",
138
+ gen_kwargs={
139
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_2.pkl"),
140
+ "split": "embedding",
141
+ },
142
+ ),
143
+ ]
144
+ elif self.config.name == "pairs":
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name="train",
148
+ gen_kwargs={
149
+ "data_file": os.path.join(dl_dir, "ref_ent_ids"),
150
+ "split": "train",
151
+ },
152
+ ),
153
+ datasets.SplitGenerator(
154
+ name="valid",
155
+ gen_kwargs={
156
+ "data_file": os.path.join(dl_dir, "valid.ref"),
157
+ "split": "valid",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name="test",
162
+ gen_kwargs={
163
+ "data_file": os.path.join(dl_dir, "ref_ent_ids"),
164
+ "split": "test",
165
+ },
166
+ ),
167
+ ]
168
+
169
+ def _generate_examples(self, data_file, split):
170
+ if split in ["LaBSE_emb"]:
171
+ des = pickle.load(open(data_file,"rb"))
172
+ i = -1
173
+ for ent,ori_emb in des.items():
174
+ i += 1
175
+ yield i, {
176
+ "column1": ent,
177
+ "column2": ori_emb,
178
+ "column3": None
179
+ }
180
+ else:
181
+ f = open(data_file,"r", encoding='utf-8')
182
+ data = f.readlines()
183
+ for i in range(len(data)):
184
+ if self.config.name in ["source", "target"]:
185
+ if split in ["ent_ids", "rel_ids"]:
186
+ row = data[i].strip('\n').split('\t')
187
+ yield i, {
188
+ "id": row[0],
189
+ "data": row[1],
190
+ }
191
+ elif split in ["rel_triples_id"]:
192
+ row = data[i].strip('\n').split('\t')
193
+ yield i, {
194
+ "head_ent": row[0],
195
+ "relation": row[1],
196
+ "tail_ent": row[2]
197
+ }
198
+ if self.config.name == "pairs":
199
+ row = data[i].strip('\n').split('\t')
200
+ yield i, {
201
+ "left_id": row[0],
202
+ "right_id": row[1]
203
+ }