File size: 14,800 Bytes
bf50634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f57ed44
bf50634
 
 
f57ed44
 
 
 
bf50634
9ffbec4
bf50634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67a4327
f57ed44
 
b6a0e02
bf50634
f57ed44
 
67a4327
 
 
 
9ffbec4
 
 
67a4327
f57ed44
67a4327
 
f57ed44
bf50634
 
f57ed44
bf50634
 
 
 
 
 
9ffbec4
 
 
bf50634
f57ed44
bf50634
 
 
f57ed44
bf50634
 
 
f57ed44
bf50634
 
 
f57ed44
bf50634
 
 
 
f57ed44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf50634
 
 
f57ed44
bf50634
 
 
 
f57ed44
bf50634
 
 
f57ed44
bf50634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77d023a
 
f57ed44
 
 
 
 
bf50634
 
 
 
 
 
 
f57ed44
 
 
bf50634
 
 
 
 
f57ed44
bf50634
 
f57ed44
bf50634
 
 
 
 
 
 
 
 
 
 
 
 
 
bd86c7c
 
bf50634
 
 
 
 
 
9ffbec4
bf50634
 
 
 
67a4327
bf50634
 
f57ed44
 
 
 
 
 
 
 
 
 
 
bf50634
 
 
 
 
 
c640b0d
9ffbec4
67a4327
c640b0d
 
 
 
 
 
 
 
 
 
 
9ffbec4
 
 
 
67a4327
bf50634
 
 
bd86c7c
bf50634
 
 
 
 
9ffbec4
bf50634
 
c640b0d
 
 
bf50634
 
 
 
 
 
 
 
9ffbec4
 
 
 
 
 
 
bf50634
 
 
 
 
f57ed44
9ffbec4
bf50634
f57ed44
 
bf50634
 
 
 
 
 
9ffbec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf50634
9ffbec4
 
 
 
 
 
 
 
 
 
 
 
f57ed44
bf50634
5467f9a
bd86c7c
5467f9a
 
f57ed44
9ffbec4
5467f9a
 
 
 
bd86c7c
5467f9a
f57ed44
 
5467f9a
 
 
 
f57ed44
5467f9a
 
 
2d80c91
5467f9a
 
f57ed44
 
 
5467f9a
 
 
f57ed44
5467f9a
 
f57ed44
5467f9a
 
bf50634
5467f9a
 
 
 
 
 
 
 
 
 
2d80c91
f57ed44
5467f9a
 
 
 
 
 
 
 
f57ed44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5467f9a
 
 
67a4327
9ffbec4
2d80c91
67a4327
 
c640b0d
2d80c91
 
 
 
 
 
 
9ffbec4
2d80c91
c640b0d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DUDE dataset loader"""

import os
from pathlib import Path
import time
import copy
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
from io import BytesIO

tqdm.pandas()
from joblib import Parallel, delayed

import pdf2image
import PyPDF2

from PIL import Image as PIL_Image
from datasets import load_dataset_builder, load_dataset, logging


logger = logging.get_logger(__name__)

PIL_Image.MAX_IMAGE_PIXELS = None  # 933120000

MAX_PAGES = 50
MAX_PDF_SIZE = 100000000  # almost 100MB
MIN_WIDTH, MIN_HEIGHT = 150, 150


def load_json(json_path):
    return json.load(open(json_path, "r"))


def save_json(json_path, data):
    with open(json_path, "w") as f:
        json.dump(data, f)


def get_images_pdf2image(document_filepath, chunksize=10):
    info = pdf2image.pdfinfo_from_path(document_filepath, userpw=None, poppler_path=None)
    maxPages = info["Pages"]
    maxPages = min(MAX_PAGES, maxPages)

    # logger.info(f"{document_filepath} has {str(maxPages)} pages")
    images = []
    for page in range(1, maxPages + 1, chunksize):
        try:
            images.extend(
                pdf2image.convert_from_path(
                    document_filepath,
                    first_page=page,
                    last_page=min(page + chunksize - 1, maxPages),
                )
            )
        except Exception as e:
            logger.warning(f"page: {page} get_images {e}")
    return images


def pdf_to_images(document_filepath, converter="PyPDF2"):
    def images_to_pagenames(images, document_filepath, page_image_dir):
        page_image_names = []
        for page_idx, page_image in enumerate(images):
            page_image_name = document_filepath.replace("PDF", "images").replace(
                ".pdf", f"_{page_idx}.jpg"
            )
            page_image_names.append(
                page_image_name.replace(page_image_dir, page_image_dir.split("/")[-1])
            )  # without dir
            if not os.path.exists(page_image_name):
                page_image.convert("RGB").save(page_image_name)
        return page_image_names

    example = {}
    example["num_pages"] = 0
    example["page_image_names"] = []
    images = []

    page_image_dir = "/".join(document_filepath.split("/")[:-1]).replace("PDF", "images")
    if not os.path.exists(page_image_dir):
        os.makedirs(page_image_dir)

    # if len(document_filepath) > MAX_PDF_SIZE:
    #     logger.warning(f"too large document {len(example['document'])}")
    #     return example
    reached_page_limit = False

    if converter == "PyPDF2":
        try:
            reader = PyPDF2.PdfReader(document_filepath)
        except Exception as e:
            logger.warning(f"read_pdf {e}")
            return example

        for p, page in enumerate(reader.pages):
            if reached_page_limit:
                break
            try:
                for image in page.images:
                    if len(images) == MAX_PAGES:
                        reached_page_limit = True
                        break
                    im = PIL_Image.open(BytesIO(image.data))
                    if im.width < MIN_WIDTH and im.height < MIN_HEIGHT:
                        continue
                    images.append(im)
            except Exception as e:
                logger.warning(f"get_images {e}")

    elif converter == "pdf2image":
        images = get_images_pdf2image(document_filepath)

    example["num_pages"] = len(images)
    if len(images) == 0:
        return example

    example["page_image_names"] = images_to_pagenames(images, document_filepath, page_image_dir)

    return example


def pdf_to_images_block(document_paths_blocks, converter):
    new_doc_metadata = {}
    for document_filepath in document_paths_blocks:
        docId = document_filepath.split("/")[-1].replace(".pdf", "")
        new_doc_metadata[docId] = pdf_to_images(document_filepath, converter=converter)
    return new_doc_metadata


def parse_textract_bbox(box):
    # 0.47840896, 0.12897822, 0.5341576 , 0.14347914 # x,w,y,h
    return np.array([box["Left"], box["Width"], box["Top"], box["Height"]])


def parse_azure_box(box, page_width, page_height):
    # Box in Azure are in format X top left, Y top left, X top right, Y top right, X bottom right, Y bottom right, X bottom left, Y bottom left
    # [14.1592, 3.8494, 28.668, 3.8494, 28.668, 8.0487, 13.9844, 7.8738]
    left = min(box[0], box[6])
    right = max(box[2], box[4])
    top = min(box[1], box[3])
    bottom = max(box[5], box[7])
    width = right - left
    height = bottom - top

    # Normalize
    left = left / page_width
    top = top / page_height
    width = width / page_width
    height = height / page_height

    return [left, width, top, height]


def get_ocr_information(ocr_path, num_pages):
    ocr_info = load_json(ocr_path)

    ocr_pages = ocr_info[0]["DocumentMetadata"]["Pages"]

    if num_pages != ocr_pages:
        raise AssertionError("Pages from images and OCR not matching, should go for pdf2image")

    page_ocr_tokens = [[] for page_ix in range(num_pages)]
    page_ocr_boxes = [[] for page_ix in range(num_pages)]
    for ocr_block in ocr_info:
        for ocr_extraction in ocr_block["Blocks"]:
            if ocr_extraction["BlockType"] == "WORD":
                text = ocr_extraction["Text"].lower()
                bounding_box = parse_textract_bbox(
                    ocr_extraction["Geometry"]["BoundingBox"]
                ).tolist()
                page = ocr_extraction["Page"] - 1

                page_ocr_tokens[page].append(text)
                page_ocr_boxes[page].append(bounding_box)

    """
    for page in range(num_pages):
        page_ocr_boxes[page] = np.array(page_ocr_boxes[page])
    """
    return page_ocr_tokens, page_ocr_boxes


def create_header(split, version, has_answer):
    header = {
        "creation_time": time.time(),
        "version": version,
        "dataset_type": split,
        "has_answer": has_answer,
    }

    return header


def get_document_info(documents_metadata, docId):
    doc_metadata = documents_metadata[docId]
    num_pages = doc_metadata["num_pages"]
    page_image_names = doc_metadata["page_image_names"]
    return num_pages, page_image_names


def format_answers(answers_list):
    answers_list = list(set([answer for answer in answers_list]))  # standardization is done
    return answers_list


def create_imdb_record_from_json(
    record, documents_metadata, documents_ocr_info, split, include_answers, include_variants=False
):

    docId = record["docId"].split("_")[0]
    try:
        num_pages, page_image_names = get_document_info(documents_metadata, docId)
        document_ocr_info = documents_ocr_info[docId]
    except Exception as e:
        print(
            "Missing: ",
            e,
            docId,
        )
        return {}

    if include_answers:
        answers = format_answers(record["answers"])
    else:
        answers = None

    if include_variants and record["answers_variants"] and not 'list' in record['answer_type']:
        answers += record["answers_variants"]

    page_image_dir = "/".join(record["document"].split("/")[:-2]).replace("PDF", "images")
    if not page_image_names or any(
        [not os.path.exists(os.path.join(page_image_dir, p)) for p in page_image_names]
    ):
        print(
            "Missing images: ",
            docId,
            #[p for p in page_image_names if not os.path.exists(os.path.join(page_image_dir, p))],
        )
        return {}

    # fix if wrongly saved pagenames
    # page_image_names = [
    #     split + image_name for image_name in page_image_names if image_name.startswith("/")
    # ]

    imdb_record = {
        "question_id": record["questionId"],
        "question": record["question"],
        "docId": docId,
        "image_name": page_image_names,
        "num_pages": num_pages,
        "ocr_tokens": document_ocr_info["ocr_tokens"],
        "ocr_normalized_boxes": document_ocr_info["ocr_boxes"],
        "set_name": split,
        "answers": answers,  # list
        "answer_page": None,
        "extra": {
            #'question_type': record['qtype'],
            #'industry': record['industry'],
            #'category': record['category'],
            "answer_type": record["answer_type"],
        },
    }

    return imdb_record


def create_imdb_from_json(
    data,
    documents_metadata,
    documents_ocr_info,
    split,
    version,
    include_answers=True,
    include_variants=False,
):
    imdb_header = create_header(split, version, include_answers)

    imdb_records = []
    for record in tqdm(data):
        imdb_record = create_imdb_record_from_json(
            record, documents_metadata, documents_ocr_info, split, include_answers, include_variants
        )
        if imdb_record:
            imdb_records.append(imdb_record)

    imdb = [imdb_header] + imdb_records

    return imdb


def parse_arguments():
    import argparse

    parser = argparse.ArgumentParser(
        description="Instantiate HuggingFace dataloader and convert to ImDB format"
    )

    parser.add_argument(
        "--redo-imdb-build",
        action="store_true",
        default=False,
        help="Whether to rebuild the imdb record and overwrite",
    )

    # Optional
    parser.add_argument(
        "--no-include-variants",
        action="store_false",
        default=True,
        help="Whether to include variants as full ground truths",
    )

    # Overwrite/Pass HF parameters
    parser.add_argument(
        "--DUDE_config",
        type=str,
        default="Amazon_original",
        help="HF Config to load to control OCR version",
    )
    parser.add_argument(
        "--data_dir",
        type=str,
        default="/home/jordy/Downloads/DUDE_train-val-test_binaries",
        help="Load PDFs and store images",
    )

    return parser.parse_args()


if __name__ == "__main__":
    """
    Parse and redo
    """
    args = parse_arguments()

    dataset = load_dataset("jordyvl/DUDE_loader", args.DUDE_config, data_dir=args.data_dir)

    splits = dataset.keys()

    for split in splits:
        split_indices = []
        OCR_paths = []
        document_paths = []
        for i, x in enumerate(dataset[split]):
            if x["data_split"] != split:  # redundant check
                continue
            if x["document"] not in document_paths:
                document_paths.append(x["document"])
                OCR_paths.append(x["OCR"])
                split_indices.append(i)

        # document_paths = document_paths[:30]
        # OCR_paths = OCR_paths[:30]

        # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
        documents_metadata_filename = f"{split}-documents_metadata.json"
        if os.path.exists(documents_metadata_filename):
            print(f"Loading from disk: {documents_metadata_filename}")
            documents_metadata = load_json(documents_metadata_filename)
        else:
            documents_metadata = {}
            num_jobs = 6
            block_size = int(len(document_paths) / num_jobs) + 1
            print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
            document_blocks = [
                document_paths[block_size * i : block_size * i + block_size]
                for i in range(num_jobs)
            ]
            print(
                "chunksize",
                len(set([docId for doc_block in document_blocks for docId in doc_block])),
            )
            parallel_results = Parallel(n_jobs=num_jobs)(
                delayed(pdf_to_images_block)(document_blocks[i], "pdf2image")
                for i in range(num_jobs)
            )

            for block_result in parallel_results:
                for docId, metadata in tqdm(block_result.items()):
                    if docId not in documents_metadata:
                        documents_metadata[docId] = metadata

            save_json(documents_metadata_filename, documents_metadata)

        # 2. Process OCR to obtain doc_ocr_info
        documents_ocr_filename = f"{split}-documents_ocr.json"

        if os.path.exists(documents_ocr_filename):
            print(f"Loading from disk: {documents_ocr_filename}")
            documents_ocr_info = load_json(documents_ocr_filename)
        else:
            documents_ocr_info = {}
            no_ocr = []
            error_ocr = []

            for i, document_filepath in enumerate(document_paths):
                docId = document_filepath.split("/")[-1].replace(".pdf", "")
                try:
                    ocr_tokens, ocr_boxes = get_ocr_information(
                        OCR_paths[i], documents_metadata[docId]["num_pages"]
                    )
                    documents_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
                except AssertionError as e:
                    print(f"image2pages issue: {e}")
                    error_ocr.append(docId)
                except IndexError as e:
                    print(f"pages issue: {e}")
                    error_ocr.append(docId)
                except FileNotFoundError:
                    print(f"FileNotFoundError issue: {e}")
                    no_ocr.append(docId)
                except KeyError:
                    print(f"Keyerror issue: {e}")
                    error_ocr.append(docId)

            save_json(documents_ocr_filename, documents_ocr_info)

        imdb_filename = f"imdb_{split}.npy"
        if os.path.exists(imdb_filename) and not args.redo_imdb_build:
            print(f"Loading from disk: {imdb_filename}")
            imdb = np.load(imdb_filename, allow_pickle=True)

        else:        
            imdb = create_imdb_from_json(
                dataset[split],  # .select(split_indices),
                documents_metadata=documents_metadata,
                documents_ocr_info=documents_ocr_info,
                split=split,
                version="0.1",
                include_answers=True,
                include_variants=(not args.no_include_variants),
            )
            np.save(imdb_filename, imdb)