jordyvl commited on
Commit
bf50634
1 Parent(s): dd969e9

first version of pdftoimage and ocr

Browse files
Files changed (1) hide show
  1. DUDE_imdb_loader.py +421 -0
DUDE_imdb_loader.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """DUDE dataset loader"""
16
+
17
+ import os
18
+ from pathlib import Path
19
+ import time
20
+ import copy
21
+ import json
22
+ import numpy as np
23
+ import pandas as pd
24
+ from tqdm import tqdm
25
+ from io import BytesIO
26
+
27
+ tqdm.pandas()
28
+ from joblib import Parallel, delayed
29
+
30
+ # import pdf2image
31
+ import PyPDF2
32
+
33
+ from datasets import load_dataset_builder, load_dataset
34
+ from PIL import Image as PIL_Image
35
+
36
+
37
+ MAX_PAGES = 50
38
+ MAX_PDF_SIZE = 100000000 # almost 100MB
39
+ MIN_WIDTH, MIN_HEIGHT = 150, 150
40
+
41
+
42
+ def load_json(json_path):
43
+ return json.load(open(json_path, "r"))
44
+
45
+
46
+ def save_json(json_path, data):
47
+ with open(json_path, "w") as f:
48
+ json.dump(data, f)
49
+
50
+
51
+ # 0. PDF to images
52
+
53
+ # 1. OCR metadata
54
+
55
+
56
+ def pdf_to_images(document_filepath):
57
+ def images_to_pagenames(images, document_filepath, page_image_dir):
58
+ "/home/jordy/Downloads/DUDE_train-val-test_binaries/PDF/val/001d6f557c342ef5a67cd38a29da9e83.pdf"
59
+
60
+ page_image_names = []
61
+ for page_idx, page_image in enumerate(images):
62
+ page_image_name = document_filepath.replace("PDF", "images").replace(
63
+ ".pdf", f"_{page_idx}.jpg"
64
+ )
65
+
66
+ # page_image_names.append(page_image_name.replace(page_images_dir, "")) #without dir
67
+
68
+ if not os.path.exists(page_image_name):
69
+ page_image.save(page_image_name)
70
+ return page_image_names
71
+
72
+ example = {}
73
+ example["document"] = document_filepath
74
+ example["pages"] = 0
75
+ example["page_image_names"] = []
76
+ images = []
77
+
78
+ page_image_dir = "/".join(example["document"].split("/")[:-1]).replace("PDF", "images")
79
+ if not os.path.exists(page_image_dir):
80
+ os.makedirs(page_image_dir)
81
+
82
+ # if len(example["document"]) > MAX_PDF_SIZE:
83
+ # logger.warning(f"too large document {len(example['document'])}")
84
+ # return example
85
+ try:
86
+ reader = PyPDF2.PdfReader(example["document"])
87
+ except Exception as e:
88
+ logger.warning(f"read_pdf {e}")
89
+ return example
90
+
91
+ reached_page_limit = False
92
+ page_iterator = reader.pages
93
+
94
+ for p, page in enumerate(page_iterator):
95
+ if reached_page_limit:
96
+ break
97
+ for image in page.images:
98
+ # try:
99
+ # except Exception as e:
100
+ # logger.warning(f"get_images {e}")
101
+ if len(images) == MAX_PAGES:
102
+ reached_page_limit = True
103
+ break
104
+
105
+ im = PIL_Image.open(BytesIO(image.data))
106
+ if im.width < MIN_WIDTH and im.height < MIN_HEIGHT:
107
+ continue
108
+ images.append(im)
109
+ example["pages"] = len(images)
110
+ if len(images) == 0:
111
+ return example
112
+
113
+ example["page_image_names"] = images_to_pagenames(images, example["document"], page_image_dir)
114
+
115
+ return example
116
+
117
+
118
+ def pdf_to_images_block(document_paths_blocks):
119
+ new_doc_metadata = {}
120
+ for document_filepath in document_paths_blocks:
121
+ docId = document_filepath.split("/")[-1].replace(".pdf", "")
122
+ new_doc_metadata[docId] = pdf_to_images(document_filepath)
123
+ return new_doc_metadata
124
+
125
+
126
+ """
127
+ def get_document_metadata(docs_metadata, doc_id, document_filepath):
128
+
129
+ if doc_id in docs_metadata and docs_metadata[doc_id]["num_pages"] != -1:
130
+ num_pages = docs_metadata[doc_id]["num_pages"]
131
+ page_image_names = docs_metadata[doc_id]["page_image_names"]
132
+
133
+ else:
134
+ try:
135
+ images = pdf2image.convert_from_path(document_filepath)
136
+ except:
137
+ print(doc_id)
138
+ return -1, -1
139
+ num_pages = len(images)
140
+ page_image_dir = ("/").join(
141
+ document_filepath.replace(documents_dir, page_images_dir).split("/")[:-1]
142
+ )
143
+ if not os.path.exists(page_image_dir):
144
+ os.makedirs(page_image_dir)
145
+
146
+ page_image_names = []
147
+ for page_idx, page_image in enumerate(images):
148
+ page_image_name = document_filepath.replace(documents_dir, page_images_dir).replace(
149
+ ".pdf", f"_{page_idx}.jpg"
150
+ )
151
+ page_image_names.append(page_image_name.replace(page_images_dir, ""))
152
+
153
+ if not os.path.exists(page_image_name):
154
+ page_image.save(page_image_name)
155
+
156
+ return num_pages, page_image_names
157
+
158
+
159
+ def get_document_metadata_block(docs_metadata, documents_path_dict, documents_blocks):
160
+ new_doc_metadata = {}
161
+ for doc_id in documents_blocks:
162
+ document_filepath = documents_path_dict[doc_id]
163
+ num_pages, page_image_names = get_document_metadata(
164
+ docs_metadata, doc_id, document_filepath
165
+ )
166
+ new_doc_metadata[doc_id] = {"num_pages": num_pages, "page_image_names": page_image_names}
167
+
168
+ return new_doc_metadata
169
+ """
170
+
171
+
172
+ def parse_textract_bbox(box):
173
+ # 0.47840896, 0.12897822, 0.5341576 , 0.14347914 # x,w,y,h
174
+ return np.array([box["Left"], box["Width"], box["Top"], box["Height"]])
175
+
176
+
177
+ def parse_azure_box(box, page_width, page_height):
178
+ # Box in Azure are in format X top left, Y top left, X top right, Y top right, X bottom right, Y bottom right, X bottom left, Y bottom left
179
+ # [14.1592, 3.8494, 28.668, 3.8494, 28.668, 8.0487, 13.9844, 7.8738]
180
+ left = min(box[0], box[6])
181
+ right = max(box[2], box[4])
182
+ top = min(box[1], box[3])
183
+ bottom = max(box[5], box[7])
184
+ width = right - left
185
+ height = bottom - top
186
+
187
+ # Normalize
188
+ left = left / page_width
189
+ top = top / page_height
190
+ width = width / page_width
191
+ height = height / page_height
192
+
193
+ return [left, width, top, height]
194
+
195
+
196
+ def get_ocr_information(doc_id):
197
+ ocr_info = load_json(os.path.join(ocr_dir, doc_id + "_original.json"))
198
+ num_pages, _ = get_document_metadata(documents_metadata, doc_id, documents[doc_id])
199
+
200
+ page_ocr_tokens = [[] for page_ix in range(num_pages)]
201
+ page_ocr_boxes = [[] for page_ix in range(num_pages)]
202
+ for ocr_block in ocr_info:
203
+ for ocr_extraction in ocr_block["Blocks"]:
204
+ if ocr_extraction["BlockType"] == "WORD":
205
+ text = ocr_extraction["Text"].lower()
206
+ bounding_box = parse_textract_bbox(ocr_extraction["Geometry"]["BoundingBox"])
207
+ page = ocr_extraction["Page"] - 1
208
+
209
+ page_ocr_tokens[page].append(text)
210
+ page_ocr_boxes[page].append(bounding_box)
211
+
212
+ for page in range(num_pages):
213
+ page_ocr_boxes[page] = np.array(page_ocr_boxes[page])
214
+
215
+ page_ocr_boxes = page_ocr_boxes
216
+ return page_ocr_tokens, page_ocr_boxes
217
+
218
+
219
+ def create_header(split, version, has_answer):
220
+ header = {
221
+ "creation_time": time.time(),
222
+ "version": version,
223
+ "dataset_type": split,
224
+ "has_answer": has_answer,
225
+ }
226
+
227
+ return header
228
+
229
+
230
+ def get_document_info(documents_metadata, doc_id):
231
+ doc_metadata = documents_metadata[doc_id]
232
+ num_pages = doc_metadata["num_pages"]
233
+ page_image_names = doc_metadata["page_image_names"]
234
+ return num_pages, page_image_names
235
+
236
+
237
+ def format_answers(answers_list):
238
+ answers_list = list(set([answer.lower() for answer in answers_list]))
239
+ return answers_list
240
+
241
+
242
+ def create_imdb_record_from_json(
243
+ record, documents_metadata, documents_ocr_information, split, include_answers
244
+ ):
245
+
246
+ doc_id = record["docId"]
247
+ # document_filepath = documents_dict[doc_id]
248
+ num_pages, page_image_names = get_document_info(documents_metadata, doc_id)
249
+ document_ocr_info = documents_ocr_information[doc_id]
250
+
251
+ if include_answers:
252
+ answers = format_answers(record["answers"])
253
+ else:
254
+ answers = None
255
+
256
+ imdb_record = {
257
+ "question_id": record["questionId"],
258
+ "question": record["question"],
259
+ "docId": doc_id,
260
+ "image_name": page_image_names,
261
+ "num_pages": num_pages,
262
+ "ocr_tokens": document_ocr_info["ocr_tokens"],
263
+ "ocr_normalized_boxes": document_ocr_info["ocr_boxes"],
264
+ "set_name": split,
265
+ "answers": answers,
266
+ "answer_page": None,
267
+ "extra": {
268
+ # 'question_type': record['qtype'],
269
+ # 'industry': record['industry'],
270
+ # 'category': record['category'],
271
+ "answer_type": record["answer_type"],
272
+ },
273
+ }
274
+
275
+ return imdb_record
276
+
277
+
278
+ def create_imdb_from_json(
279
+ data, documents_metadata, documents_ocr_information, split, version, include_answers
280
+ ):
281
+ imdb_header = create_header(split, version, include_answers)
282
+
283
+ imdb_records = []
284
+ for record in tqdm(data):
285
+ imdb_records.append(
286
+ create_imdb_record_from_json(
287
+ record, documents_metadata, documents_ocr_information, split, include_answers
288
+ )
289
+ )
290
+
291
+ imdb = [imdb_header] + imdb_records
292
+
293
+ return imdb
294
+
295
+
296
+ if __name__ == "__main__":
297
+ dataset = load_dataset(
298
+ "../DUDE_loader/DUDE_loader.py",
299
+ "DUDE",
300
+ data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries",
301
+ )
302
+ splits = datasets.keys()
303
+
304
+ split = "val"
305
+
306
+
307
+ # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
308
+
309
+ document_paths = sorted(
310
+ set([x["document"] for x in dataset[split] if x["data_split"] == split])
311
+ )
312
+
313
+ document_paths = document_paths[:30]
314
+
315
+ # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
316
+ documents_metadata_filename = "documents_metadata.json"
317
+ if os.path.exists(documents_metadata_filename):
318
+ documents_metadata = load_json(documents_metadata_filename)
319
+ else:
320
+ documents_metadata = {}
321
+ # for document_filepath in tqdm(document_paths):
322
+ # documents_metadata[docId] = pdf_to_images(document_filepath)
323
+ num_jobs = 6
324
+ block_size = int(len(document_paths) / num_jobs) + 1
325
+ print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
326
+ documents_blocks = [
327
+ document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs)
328
+ ]
329
+ print(
330
+ "chunksize",
331
+ len(set([doc_id for doc_block in documents_blocks for doc_id in doc_block])),
332
+ )
333
+ parallel_results = Parallel(n_jobs=num_jobs)(
334
+ delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i])
335
+ for i in range(num_jobs)
336
+ )
337
+
338
+ for block_result in parallel_results:
339
+ for doc_id, metadata in tqdm(block_result.items()):
340
+ if doc_id not in documents_metadata:
341
+ documents_metadata[doc_id] = metadata
342
+
343
+ save_json(documents_metadata_filename, documents_metadata)
344
+
345
+ #2. Process OCR to obtain doc_ocr_info
346
+ documents_ocr_filename = "documents_ocr.json"
347
+
348
+ if os.path.exists(documents_ocr_filename):
349
+ documents_ocr_info = load_json(documents_ocr_filename)
350
+ else:
351
+ documents_ocr_info = {}
352
+ no_ocr = []
353
+ error_ocr = []
354
+
355
+ for document_filepath in document_paths:
356
+ docId = document_filepath.split("/")[-1].replace(".pdf", "")
357
+ try:
358
+ ocr_tokens, ocr_boxes = get_ocr_information(docId)
359
+ doc_ocr_info[docId] = {'ocr_tokens': ocr_tokens, 'ocr_boxes': ocr_boxes}
360
+ except FileNotFoundError:
361
+ no_ocr.append(docId)
362
+ except KeyError:
363
+ error_ocr.append(docId)
364
+
365
+ save_json(documents_ocr_filename, documents_ocr_info)
366
+
367
+ # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
368
+ # if not os.path.exists(page_image_dir):
369
+ # os.makedirs(page_image_dir)
370
+ # dataset.info.features
371
+
372
+ """
373
+ Describe all steps that need to happen after loading HF DUDE dataset
374
+ Change functions
375
+
376
+
377
+ page_images_dir
378
+
379
+
380
+
381
+ 2. Process OCR to obtain doc_ocr_info
382
+ """
383
+ # update dataset with
384
+ # for split in SPLITS
385
+
386
+ # documents_metadata
387
+ # doc_ocr_info
388
+
389
+ # dict to unique docs
390
+ # documents_metadata[doc_id] = {"num_pages": num_pages, "page_image_names": image_names}
391
+ # doc_ocr_info[doc_id] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
392
+
393
+ """
394
+ train_imdb = create_imdb_from_json(
395
+ train_data,
396
+ documents_metadata=documents_metadata,
397
+ documents_ocr_information=doc_ocr_info,
398
+ split="train",
399
+ version="0.1",
400
+ include_answers=True,
401
+ )
402
+ val_imdb = create_imdb_from_json(
403
+ val_data,
404
+ documents_metadata=documents_metadata,
405
+ documents_ocr_information=doc_ocr_info,
406
+ split="train",
407
+ version="0.1",
408
+ include_answers=True,
409
+ )
410
+ np.save("Imdb/train_imdb.npy", train_imdb)
411
+ np.save("Imdb/val_imdb.npy", val_imdb)
412
+
413
+ document_paths = []
414
+ num_jobs = 6
415
+ block_size = int(len(document_ids) / num_jobs) + 1
416
+ print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_ids)})")
417
+ parallel_results = Parallel(n_jobs=num_jobs)(
418
+ delayed(get_document_metadata_block)(documents_metadata, documents, documents_blocks[i])
419
+ for i in range(num_jobs)
420
+ )
421
+ """