fixed docbank.py and README.md
Browse files- README.md +5 -5
- docbank.py +39 -37
README.md
CHANGED
@@ -17,14 +17,14 @@ dataset_info:
|
|
17 |
dtype: string
|
18 |
splits:
|
19 |
- name: train
|
20 |
-
num_bytes:
|
21 |
num_examples: 400000
|
22 |
- name: validation
|
23 |
-
num_bytes:
|
24 |
num_examples: 50000
|
25 |
- name: test
|
26 |
-
num_bytes:
|
27 |
num_examples: 50000
|
28 |
-
download_size:
|
29 |
-
dataset_size:
|
30 |
---
|
|
|
17 |
dtype: string
|
18 |
splits:
|
19 |
- name: train
|
20 |
+
num_bytes: 80004043
|
21 |
num_examples: 400000
|
22 |
- name: validation
|
23 |
+
num_bytes: 9995812
|
24 |
num_examples: 50000
|
25 |
- name: test
|
26 |
+
num_bytes: 9995812
|
27 |
num_examples: 50000
|
28 |
+
download_size: 0
|
29 |
+
dataset_size: 99995667
|
30 |
---
|
docbank.py
CHANGED
@@ -93,13 +93,14 @@ class DocBank(datasets.GeneratorBasedBuilder):
|
|
93 |
# data_dir = dl_manager.download_and_extract(urls)
|
94 |
|
95 |
self.data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
|
|
96 |
|
97 |
return [
|
98 |
datasets.SplitGenerator(
|
99 |
name=datasets.Split.TRAIN,
|
100 |
# These kwargs will be passed to _generate_examples
|
101 |
gen_kwargs={
|
102 |
-
"filepath": os.path.join("train.jsonl"),
|
103 |
"split": "train",
|
104 |
},
|
105 |
),
|
@@ -107,7 +108,7 @@ class DocBank(datasets.GeneratorBasedBuilder):
|
|
107 |
name=datasets.Split.VALIDATION,
|
108 |
# These kwargs will be passed to _generate_examples
|
109 |
gen_kwargs={
|
110 |
-
"filepath": os.path.join("dev.jsonl"),
|
111 |
"split": "dev",
|
112 |
},
|
113 |
),
|
@@ -115,7 +116,7 @@ class DocBank(datasets.GeneratorBasedBuilder):
|
|
115 |
name=datasets.Split.TEST,
|
116 |
# These kwargs will be passed to _generate_examples
|
117 |
gen_kwargs={
|
118 |
-
"filepath": os.path.join("test.jsonl"),
|
119 |
"split": "test"
|
120 |
},
|
121 |
),
|
@@ -124,44 +125,45 @@ class DocBank(datasets.GeneratorBasedBuilder):
|
|
124 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
125 |
def _generate_examples(self, filepath, split):
|
126 |
|
127 |
-
print(os.getcwd())
|
128 |
-
print(os.path.dirname(os.path.abspath(__file__)))
|
129 |
-
|
130 |
with open(filepath,'rt') as fp:
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
rgbs = []
|
139 |
-
fontnames = []
|
140 |
-
structures = []
|
141 |
-
|
142 |
-
with open(txt_file, 'r', encoding='utf8') as fp:
|
143 |
-
for line in fp.readlines():
|
144 |
-
tts = line.split('\t')
|
145 |
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
fontname = tts[8]
|
152 |
-
structure = tts[9].strip()
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
words.append(word)
|
155 |
bboxes.append(bbox)
|
156 |
rgbs.append(rgb)
|
157 |
fontnames.append(fontname)
|
158 |
-
structures.append(structure)
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
93 |
# data_dir = dl_manager.download_and_extract(urls)
|
94 |
|
95 |
self.data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
96 |
+
cwd = os.path.dirname(os.path.abspath(__file__))
|
97 |
|
98 |
return [
|
99 |
datasets.SplitGenerator(
|
100 |
name=datasets.Split.TRAIN,
|
101 |
# These kwargs will be passed to _generate_examples
|
102 |
gen_kwargs={
|
103 |
+
"filepath": os.path.join(cwd,"train.jsonl"),
|
104 |
"split": "train",
|
105 |
},
|
106 |
),
|
|
|
108 |
name=datasets.Split.VALIDATION,
|
109 |
# These kwargs will be passed to _generate_examples
|
110 |
gen_kwargs={
|
111 |
+
"filepath": os.path.join(cwd,"dev.jsonl"),
|
112 |
"split": "dev",
|
113 |
},
|
114 |
),
|
|
|
116 |
name=datasets.Split.TEST,
|
117 |
# These kwargs will be passed to _generate_examples
|
118 |
gen_kwargs={
|
119 |
+
"filepath": os.path.join(cwd,"test.jsonl"),
|
120 |
"split": "test"
|
121 |
},
|
122 |
),
|
|
|
125 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
126 |
def _generate_examples(self, filepath, split):
|
127 |
|
|
|
|
|
|
|
128 |
with open(filepath,'rt') as fp:
|
129 |
+
for file in fp:
|
130 |
+
index,basename = eval(file)
|
131 |
+
|
132 |
+
txt_file = self.data_dir+'/DocBank_500K_txt/'+basename+'.txt'
|
133 |
+
img_file = self.data_dir+'/DocBank_500K_ori_img/'+basename+'_ori.jpg'
|
134 |
+
|
135 |
+
with open(txt_file, 'r', encoding='utf8') as fp:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
+
words = []
|
138 |
+
bboxes = []
|
139 |
+
rgbs = []
|
140 |
+
fontnames = []
|
141 |
+
structures = []
|
|
|
|
|
142 |
|
143 |
+
for row in fp:
|
144 |
+
tts = row.split('\t')
|
145 |
+
|
146 |
+
assert len(tts) == 10, f'Incomplete line in file {txt_file}'
|
147 |
+
|
148 |
+
word = tts[0]
|
149 |
+
bbox = list(map(int, tts[1:5]))
|
150 |
+
rgb = list(map(int, tts[5:8]))
|
151 |
+
fontname = tts[8]
|
152 |
+
structure = tts[9].strip()
|
153 |
+
|
154 |
words.append(word)
|
155 |
bboxes.append(bbox)
|
156 |
rgbs.append(rgb)
|
157 |
fontnames.append(fontname)
|
158 |
+
structures.append(structure)
|
159 |
+
|
160 |
+
# index = str(index)+'_'+str(row)
|
161 |
+
|
162 |
+
yield index, {
|
163 |
+
"image": img_file,
|
164 |
+
"token": words,
|
165 |
+
"bounding_box": bboxes,
|
166 |
+
"color": rgbs,
|
167 |
+
"font": fontnames,
|
168 |
+
"label": structures,
|
169 |
+
}
|