Create geo-heter.py
Browse files- geo-heter.py +93 -0
geo-heter.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import datasets
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
class geo_heterConfig(datasets.BuilderConfig):
|
6 |
+
def __init__(self, features, data_url, **kwargs):
|
7 |
+
super(geo_heterConfig, self).__init__(**kwargs)
|
8 |
+
self.features = features
|
9 |
+
self.data_url = data_url
|
10 |
+
|
11 |
+
class geo_heter(datasets.GeneratorBasedBuilder):
|
12 |
+
BUILDER_CONFIGS = [
|
13 |
+
geo_heterConfig(
|
14 |
+
name="pairs",
|
15 |
+
features={
|
16 |
+
"ltable_id":datasets.Value("string"),
|
17 |
+
"rtable_id":datasets.Value("string"),
|
18 |
+
"label":datasets.Value("string"),
|
19 |
+
},
|
20 |
+
data_url="https://huggingface.co/datasets/matchbench/geo-heter/resolve/main/",
|
21 |
+
),
|
22 |
+
geo_heterConfig(
|
23 |
+
name="source",
|
24 |
+
features={
|
25 |
+
"name":datasets.Value("string"),
|
26 |
+
"latitude":datasets.Value("string"),
|
27 |
+
"longitude":datasets.Value("string"),
|
28 |
+
"address":datasets.Value("string"),
|
29 |
+
"postalCode":datasets.Value("string"),
|
30 |
+
},
|
31 |
+
data_url="https://huggingface.co/datasets/matchbench/geo-heter/resolve/main/tableA.csv",
|
32 |
+
),
|
33 |
+
geo_heterConfig(
|
34 |
+
name="target",
|
35 |
+
features={
|
36 |
+
"name":datasets.Value("string"),
|
37 |
+
"position":datasets.Value("string"),
|
38 |
+
"address":datasets.Value("string"),
|
39 |
+
"postalCode":datasets.Value("string"),
|
40 |
+
},
|
41 |
+
data_url="https://huggingface.co/datasets/matchbench/geo-heter/resolve/main/tableB.csv",
|
42 |
+
),
|
43 |
+
]
|
44 |
+
|
45 |
+
def _info(self):
|
46 |
+
return datasets.DatasetInfo(
|
47 |
+
features=datasets.Features(self.config.features)
|
48 |
+
)
|
49 |
+
|
50 |
+
def _split_generators(self, dl_manager):
|
51 |
+
if self.config.name == "pairs":
|
52 |
+
return [
|
53 |
+
datasets.SplitGenerator(
|
54 |
+
name=split,
|
55 |
+
gen_kwargs={
|
56 |
+
"path_file": dl_manager.download_and_extract(os.path.join(self.config.data_url, f"{split}.csv")),
|
57 |
+
"split":split,
|
58 |
+
}
|
59 |
+
)
|
60 |
+
for split in ["train", "valid", "test"]
|
61 |
+
]
|
62 |
+
if self.config.name == "source":
|
63 |
+
return [ datasets.SplitGenerator(name="source",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"source",})]
|
64 |
+
if self.config.name == "target":
|
65 |
+
return [ datasets.SplitGenerator(name="target",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"target",})]
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
def _generate_examples(self, path_file, split):
|
70 |
+
file = pd.read_csv(path_file)
|
71 |
+
for i, row in file.iterrows():
|
72 |
+
if split not in ['source', 'target']:
|
73 |
+
yield i, {
|
74 |
+
"ltable_id": row["ltable_id"],
|
75 |
+
"rtable_id": row["rtable_id"],
|
76 |
+
"label": row["label"],
|
77 |
+
}
|
78 |
+
else:
|
79 |
+
if split == 'source':
|
80 |
+
yield i, {
|
81 |
+
"name": row["name"],
|
82 |
+
"latitude": row["latitude"],
|
83 |
+
"longitude": row["longitude"],
|
84 |
+
"address": row["address"],
|
85 |
+
"postalCode": row["postalCode"],
|
86 |
+
}
|
87 |
+
else:
|
88 |
+
yield i, {
|
89 |
+
"name": row["name"],
|
90 |
+
"position": row["position"],
|
91 |
+
"address": row["address"],
|
92 |
+
"postalCode": row["postalCode"],
|
93 |
+
}
|