jordyvl commited on
Commit
77d023a
1 Parent(s): bf50634

first version of pdftoimage and ocr

Browse files
Files changed (1) hide show
  1. DUDE_imdb_loader.py +26 -19
DUDE_imdb_loader.py CHANGED
@@ -193,9 +193,9 @@ def parse_azure_box(box, page_width, page_height):
193
  return [left, width, top, height]
194
 
195
 
196
- def get_ocr_information(doc_id):
197
- ocr_info = load_json(os.path.join(ocr_dir, doc_id + "_original.json"))
198
- num_pages, _ = get_document_metadata(documents_metadata, doc_id, documents[doc_id])
199
 
200
  page_ocr_tokens = [[] for page_ix in range(num_pages)]
201
  page_ocr_boxes = [[] for page_ix in range(num_pages)]
@@ -303,14 +303,19 @@ if __name__ == "__main__":
303
 
304
  split = "val"
305
 
306
-
307
- # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
308
-
309
- document_paths = sorted(
310
- set([x["document"] for x in dataset[split] if x["data_split"] == split])
311
- )
 
 
312
 
313
  document_paths = document_paths[:30]
 
 
 
314
 
315
  # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
316
  documents_metadata_filename = "documents_metadata.json"
@@ -328,7 +333,7 @@ if __name__ == "__main__":
328
  ]
329
  print(
330
  "chunksize",
331
- len(set([doc_id for doc_block in documents_blocks for doc_id in doc_block])),
332
  )
333
  parallel_results = Parallel(n_jobs=num_jobs)(
334
  delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i])
@@ -336,13 +341,13 @@ if __name__ == "__main__":
336
  )
337
 
338
  for block_result in parallel_results:
339
- for doc_id, metadata in tqdm(block_result.items()):
340
- if doc_id not in documents_metadata:
341
- documents_metadata[doc_id] = metadata
342
 
343
  save_json(documents_metadata_filename, documents_metadata)
344
 
345
- #2. Process OCR to obtain doc_ocr_info
346
  documents_ocr_filename = "documents_ocr.json"
347
 
348
  if os.path.exists(documents_ocr_filename):
@@ -352,18 +357,20 @@ if __name__ == "__main__":
352
  no_ocr = []
353
  error_ocr = []
354
 
355
- for document_filepath in document_paths:
356
  docId = document_filepath.split("/")[-1].replace(".pdf", "")
357
  try:
358
- ocr_tokens, ocr_boxes = get_ocr_information(docId)
359
- doc_ocr_info[docId] = {'ocr_tokens': ocr_tokens, 'ocr_boxes': ocr_boxes}
360
  except FileNotFoundError:
361
  no_ocr.append(docId)
362
  except KeyError:
363
  error_ocr.append(docId)
364
-
365
  save_json(documents_ocr_filename, documents_ocr_info)
366
 
 
 
367
  # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
368
  # if not os.path.exists(page_image_dir):
369
  # os.makedirs(page_image_dir)
@@ -387,7 +394,7 @@ if __name__ == "__main__":
387
  # doc_ocr_info
388
 
389
  # dict to unique docs
390
- # documents_metadata[doc_id] = {"num_pages": num_pages, "page_image_names": image_names}
391
  # doc_ocr_info[doc_id] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
392
 
393
  """
 
193
  return [left, width, top, height]
194
 
195
 
196
+ def get_ocr_information(ocr_path, num_pages):
197
+ ocr_info = load_json(ocr_path)
198
+ #num_pages, _ = get_document_metadata(documents_metadata, doc_id, documents[doc_id])
199
 
200
  page_ocr_tokens = [[] for page_ix in range(num_pages)]
201
  page_ocr_boxes = [[] for page_ix in range(num_pages)]
 
303
 
304
  split = "val"
305
 
306
+ OCR_paths = []
307
+ document_paths = []
308
+ for i, x in enumerate(dataset):
309
+ if x["data_split"] != split:
310
+ continue
311
+ if x["document"] not in document_paths:
312
+ document_paths.append(x["document"])
313
+ OCR_paths.append(x["OCR"])
314
 
315
  document_paths = document_paths[:30]
316
+ OCR_paths = OCR_paths[:30]
317
+
318
+ # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
319
 
320
  # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
321
  documents_metadata_filename = "documents_metadata.json"
 
333
  ]
334
  print(
335
  "chunksize",
336
+ len(set([docId for doc_block in documents_blocks for docId in doc_block])),
337
  )
338
  parallel_results = Parallel(n_jobs=num_jobs)(
339
  delayed(get_document_metadata_block)(documents_metadata, documents, document_paths[i])
 
341
  )
342
 
343
  for block_result in parallel_results:
344
+ for docId, metadata in tqdm(block_result.items()):
345
+ if docId not in documents_metadata:
346
+ documents_metadata[docId] = metadata
347
 
348
  save_json(documents_metadata_filename, documents_metadata)
349
 
350
+ # 2. Process OCR to obtain doc_ocr_info
351
  documents_ocr_filename = "documents_ocr.json"
352
 
353
  if os.path.exists(documents_ocr_filename):
 
357
  no_ocr = []
358
  error_ocr = []
359
 
360
+ for i, document_filepath in enumerate(document_paths):
361
  docId = document_filepath.split("/")[-1].replace(".pdf", "")
362
  try:
363
+ ocr_tokens, ocr_boxes = get_ocr_information(OCR_paths[i], documents_metadata[docId]["num_pages"])
364
+ doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
365
  except FileNotFoundError:
366
  no_ocr.append(docId)
367
  except KeyError:
368
  error_ocr.append(docId)
369
+
370
  save_json(documents_ocr_filename, documents_ocr_info)
371
 
372
+ import pdb; pdb.set_trace() # breakpoint 930f4f6a //
373
+
374
  # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
375
  # if not os.path.exists(page_image_dir):
376
  # os.makedirs(page_image_dir)
 
394
  # doc_ocr_info
395
 
396
  # dict to unique docs
397
+ # documents_metadata[docId] = {"num_pages": num_pages, "page_image_names": image_names}
398
  # doc_ocr_info[doc_id] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
399
 
400
  """