jordyvl commited on
Commit
5467f9a
1 Parent(s): 77d023a

first version of pdftoimage and ocr

Browse files
Files changed (1) hide show
  1. DUDE_imdb_loader.py +78 -73
DUDE_imdb_loader.py CHANGED
@@ -301,76 +301,88 @@ if __name__ == "__main__":
301
  )
302
  splits = datasets.keys()
303
 
304
- split = "val"
305
-
306
- OCR_paths = []
307
- document_paths = []
308
- for i, x in enumerate(dataset):
309
- if x["data_split"] != split:
310
  continue
311
- if x["document"] not in document_paths:
312
- document_paths.append(x["document"])
313
- OCR_paths.append(x["OCR"])
314
 
315
- document_paths = document_paths[:30]
316
- OCR_paths = OCR_paths[:30]
317
-
318
- # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
- # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
321
- documents_metadata_filename = "documents_metadata.json"
322
- if os.path.exists(documents_metadata_filename):
323
- documents_metadata = load_json(documents_metadata_filename)
324
- else:
325
- documents_metadata = {}
326
- # for document_filepath in tqdm(document_paths):
327
- # documents_metadata[docId] = pdf_to_images(document_filepath)
328
- num_jobs = 6
329
- block_size = int(len(document_paths) / num_jobs) + 1
330
- print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
331
- documents_blocks = [
332
- document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs)
333
- ]
334
- print(
335
- "chunksize",
336
- len(set([docId for doc_block in documents_blocks for docId in doc_block])),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
  )
338
- parallel_results = Parallel(n_jobs=num_jobs)(
339
- delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i])
340
- for i in range(num_jobs)
341
- )
342
-
343
- for block_result in parallel_results:
344
- for docId, metadata in tqdm(block_result.items()):
345
- if docId not in documents_metadata:
346
- documents_metadata[docId] = metadata
347
-
348
- save_json(documents_metadata_filename, documents_metadata)
349
-
350
- # 2. Process OCR to obtain doc_ocr_info
351
- documents_ocr_filename = "documents_ocr.json"
352
-
353
- if os.path.exists(documents_ocr_filename):
354
- documents_ocr_info = load_json(documents_ocr_filename)
355
- else:
356
- documents_ocr_info = {}
357
- no_ocr = []
358
- error_ocr = []
359
-
360
- for i, document_filepath in enumerate(document_paths):
361
- docId = document_filepath.split("/")[-1].replace(".pdf", "")
362
- try:
363
- ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"])
364
- doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
365
- except FileNotFoundError:
366
- no_ocr.append(docId)
367
- except KeyError:
368
- error_ocr.append(docId)
369
-
370
- save_json(documents_ocr_filename, documents_ocr_info)
371
 
372
  import pdb; pdb.set_trace() # breakpoint 930f4f6a //
373
-
374
  # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
375
  # if not os.path.exists(page_image_dir):
376
  # os.makedirs(page_image_dir)
@@ -406,16 +418,9 @@ if __name__ == "__main__":
406
  version="0.1",
407
  include_answers=True,
408
  )
409
- val_imdb = create_imdb_from_json(
410
- val_data,
411
- documents_metadata=documents_metadata,
412
- documents_ocr_information=doc_ocr_info,
413
- split="train",
414
- version="0.1",
415
- include_answers=True,
416
- )
417
  np.save("Imdb/train_imdb.npy", train_imdb)
418
- np.save("Imdb/val_imdb.npy", val_imdb)
419
 
420
  document_paths = []
421
  num_jobs = 6
 
301
  )
302
  splits = datasets.keys()
303
 
304
+ for split in splits:
305
+ if split != "val":
 
 
 
 
306
  continue
 
 
 
307
 
308
+ OCR_paths = []
309
+ document_paths = []
310
+ for i, x in enumerate(dataset):
311
+ if x["data_split"] != split:
312
+ continue
313
+ if x["document"] not in document_paths:
314
+ document_paths.append(x["document"])
315
+ OCR_paths.append(x["OCR"])
316
+
317
+ document_paths = document_paths[:30]
318
+ OCR_paths = OCR_paths[:30]
319
+
320
+ # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
321
+
322
+ # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
323
+ documents_metadata_filename = f"{split}-documents_metadata.json"
324
+ if os.path.exists(documents_metadata_filename):
325
+ documents_metadata = load_json(documents_metadata_filename)
326
+ else:
327
+ documents_metadata = {}
328
+ # for document_filepath in tqdm(document_paths):
329
+ # documents_metadata[docId] = pdf_to_images(document_filepath)
330
+ num_jobs = 6
331
+ block_size = int(len(document_paths) / num_jobs) + 1
332
+ print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
333
+ documents_blocks = [
334
+ document_paths[block_size * i : block_size * i + block_size] for i in range(num_jobs)
335
+ ]
336
+ print(
337
+ "chunksize",
338
+ len(set([docId for doc_block in documents_blocks for docId in doc_block])),
339
+ )
340
+ parallel_results = Parallel(n_jobs=num_jobs)(
341
+ delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i])
342
+ for i in range(num_jobs)
343
+ )
344
 
345
+ for block_result in parallel_results:
346
+ for docId, metadata in tqdm(block_result.items()):
347
+ if docId not in documents_metadata:
348
+ documents_metadata[docId] = metadata
349
+
350
+ save_json(documents_metadata_filename, documents_metadata)
351
+
352
+ # 2. Process OCR to obtain doc_ocr_info
353
+ documents_ocr_filename = f"{split}-documents_ocr.json"
354
+
355
+ if os.path.exists(documents_ocr_filename):
356
+ documents_ocr_info = load_json(documents_ocr_filename)
357
+ else:
358
+ documents_ocr_info = {}
359
+ no_ocr = []
360
+ error_ocr = []
361
+
362
+ for i, document_filepath in enumerate(document_paths):
363
+ docId = document_filepath.split("/")[-1].replace(".pdf", "")
364
+ try:
365
+ ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"])
366
+ doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
367
+ except FileNotFoundError:
368
+ no_ocr.append(docId)
369
+ except KeyError:
370
+ error_ocr.append(docId)
371
+
372
+ save_json(documents_ocr_filename, documents_ocr_info)
373
+
374
+ imdb = create_imdb_from_json(
375
+ data,
376
+ documents_metadata=documents_metadata,
377
+ documents_ocr_information=doc_ocr_info,
378
+ split="train",
379
+ version="0.1",
380
+ include_answers=True,
381
  )
382
+ np.save(f"{split}_imdb.npy", imdb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
  import pdb; pdb.set_trace() # breakpoint 930f4f6a //
385
+
386
  # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
387
  # if not os.path.exists(page_image_dir):
388
  # os.makedirs(page_image_dir)
 
418
  version="0.1",
419
  include_answers=True,
420
  )
421
+
422
+
 
 
 
 
 
 
423
  np.save("Imdb/train_imdb.npy", train_imdb)
 
424
 
425
  document_paths = []
426
  num_jobs = 6