parquet-converter commited on
Commit
915125c
1 Parent(s): 2e15e70

Update parquet files

Browse files
dwy100k-d-y.py DELETED
@@ -1,176 +0,0 @@
1
- import datasets
2
- import os
3
- import pickle
4
- import json
5
-
6
- class Dwy100kDYConfig(datasets.BuilderConfig):
7
-
8
- def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
9
- super(Dwy100kDYConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
10
- self.features = features
11
- self.label_classes = label_classes
12
- self.data_url = data_url
13
- self.citation = citation
14
- self.url = url
15
-
16
-
17
- class Dwy100kDY(datasets.GeneratorBasedBuilder):
18
-
19
- BUILDER_CONFIGS = [
20
- Dwy100kDYConfig(
21
- name="source",
22
- features=["column1", "column2", "column3"],
23
- citation="TODO",
24
- url="TODO",
25
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-y/resolve/main/dwy-dbp-yg-100k.zip"
26
- ),
27
- Dwy100kDYConfig(
28
- name="target",
29
- features=["column1", "column2", "column3"],
30
- citation="TODO",
31
- url="TODO",
32
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-y/resolve/main/dwy-dbp-yg-100k.zip"
33
- ),
34
- Dwy100kDYConfig(
35
- name="pairs",
36
- features=["left_id", "right_id"],
37
- citation="TODO",
38
- url="TODO",
39
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-y/resolve/main/dwy-dbp-yg-100k.zip"
40
- ),
41
- ]
42
-
43
- def _info(self):
44
- if self.config.name=="source":
45
- features = {feature: datasets.Value("string") for feature in self.config.features}
46
- elif self.config.name=="target":
47
- features = {feature: datasets.Value("string") for feature in self.config.features}
48
- elif self.config.name=="pairs":
49
- features = {feature: datasets.Value("string") for feature in self.config.features}
50
-
51
- return datasets.DatasetInfo(
52
- features=datasets.Features(features)
53
- )
54
-
55
- def _split_generators(self, dl_manager):
56
- dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
57
- #task_name = _get_task_name_from_data_url(self.config.data_url)
58
- #dl_dir = os.path.join(dl_dir, task_name)
59
- if self.config.name == "source":
60
- return [
61
- datasets.SplitGenerator(
62
- name="ent_ids",
63
- gen_kwargs={
64
- "data_file": os.path.join(dl_dir, "ent_ids_1"),
65
- "split": "ent_ids",
66
- },
67
- ),
68
- datasets.SplitGenerator(
69
- name="rel_triples_id",
70
- gen_kwargs={
71
- "data_file": os.path.join(dl_dir, "triples_1"),
72
- "split": "rel_triples_id",
73
- },
74
- ),
75
- ]
76
- elif self.config.name == "target":
77
- return [
78
- datasets.SplitGenerator(
79
- name="ent_ids",
80
- gen_kwargs={
81
- "data_file": os.path.join(dl_dir, "ent_ids_2"),
82
- "split": "ent_ids",
83
- },
84
- ),
85
- datasets.SplitGenerator(
86
- name="rel_triples_id",
87
- gen_kwargs={
88
- "data_file": os.path.join(dl_dir, "triples_2"),
89
- "split": "rel_triples_id",
90
- },
91
- )
92
- ]
93
- elif self.config.name == "pairs":
94
- return [
95
- datasets.SplitGenerator(
96
- name="train",
97
- gen_kwargs={
98
- "data_file": os.path.join(dl_dir, "train_ent_ids"),
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name="valid",
104
- gen_kwargs={
105
- "data_file": os.path.join(dl_dir, "valid_ent_ids"),
106
- "split": "valid",
107
- },
108
- ),
109
- datasets.SplitGenerator(
110
- name="test",
111
- gen_kwargs={
112
- "data_file": os.path.join(dl_dir, "ref_ent_ids"),
113
- "split": "test",
114
- },
115
- ),
116
- datasets.SplitGenerator(
117
- name="sup",
118
- gen_kwargs={
119
- "data_file": os.path.join(dl_dir, "sup_ent_ids"),
120
- "split": "sup",
121
- },
122
- ),
123
- datasets.SplitGenerator(
124
- name="ref",
125
- gen_kwargs={
126
- "data_file": os.path.join(dl_dir, "ref_ent_ids"),
127
- "split": "ref",
128
- },
129
- ),
130
- ]
131
-
132
-
133
- def _generate_examples(self, data_file, split):
134
- if split in ["translated_name"]:
135
- trans = json.load(open(data_file,"r"))
136
- #i = -1
137
- for i in range(len(trans)):
138
- yield i, {
139
- "column1": str(trans[i][0]),
140
- "column2": str(trans[i][1]),
141
- "column3": None
142
- }
143
- else:
144
- f = open(data_file,"r",encoding='utf-8')
145
- data = f.readlines()
146
- for i in range(len(data)):
147
- #print(row)
148
- if self.config.name in ["source", "target"]:
149
- if split in ["ent_ids","rel_ids"]:
150
- row = data[i].strip('\n').split('\t')
151
- yield i, {
152
- "column1": row[0],
153
- "column2": row[1],
154
- "column3": None
155
- }
156
- elif split in ["rel_triples_id","rel_triples_whole","rel_triples_name"]:
157
- row = data[i].strip('\n').split('\t')
158
- yield i, {
159
- "column1": row[0],
160
- "column2": row[1],
161
- "column3": row[2]
162
- }
163
- elif split in ["attr_triples"]:
164
- row = data[i].rstrip('\n').split('\t')
165
- yield i, {
166
- "column1": row[0],
167
- "column2": row[1],
168
- "column3": row[2]
169
- }
170
-
171
- if self.config.name == "pairs":
172
- row = data[i].strip('\n').split('\t')
173
- yield i, {
174
- "left_id": row[0],
175
- "right_id": row[1]
176
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dwy-dbp-yg-100k.zip → pairs/dwy100k-d-y-ref.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a31b0cd3e9158830704efa3044e5635b9210908edb717f2b076b56861939c51d
3
- size 9878439
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a006837a616d148fbb615bc1cc94c7c13cc5f4da1b5fe4a31cef301b6cbad6fd
3
+ size 771855
pairs/dwy100k-d-y-sup.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e23766d4331c08016c38558cb6597b7f347d2e8f0c609162286cf673ca767bd0
3
+ size 423845
pairs/dwy100k-d-y-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a006837a616d148fbb615bc1cc94c7c13cc5f4da1b5fe4a31cef301b6cbad6fd
3
+ size 771855
pairs/dwy100k-d-y-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a567678be5d3a02bdf014b8e8834af19999575c9bfbee007f740e8a34033014
3
+ size 282795
pairs/dwy100k-d-y-valid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c74bc8444172830772968fb0d0e4e4ee04da444e4e7913f15b7134e8c51c6dd
3
+ size 141914
source/dwy100k-d-y-ent_ids.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ca02529f91a6bd95f40a6c50b4220bb431430c4e1927a1b9ef3fb03c0c7f9f
3
+ size 2509625
source/dwy100k-d-y-rel_triples_id.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fe04dd3796a008c6c726922186117be3644d2dee47f433ba9164ffb35b66ca6
3
+ size 6633346
target/dwy100k-d-y-ent_ids.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d02b409e4f084ed72876694b99cf4320e7bb17ab16d9ae1cd711b67d8c99cbbc
3
+ size 2401655
target/dwy100k-d-y-rel_triples_id.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e374533f2f3929f33527c27df2e2c86164c60d379571aa1233334a768b9e65
3
+ size 7352917