jordyvl commited on
Commit
bd86c7c
1 Parent(s): 5467f9a

first version of pdftoimage and ocr

Browse files
Files changed (1) hide show
  1. DUDE_imdb_loader.py +23 -25
DUDE_imdb_loader.py CHANGED
@@ -124,17 +124,17 @@ def pdf_to_images_block(document_paths_blocks):
124
 
125
 
126
  """
127
- def get_document_metadata(docs_metadata, doc_id, document_filepath):
128
 
129
- if doc_id in docs_metadata and docs_metadata[doc_id]["num_pages"] != -1:
130
- num_pages = docs_metadata[doc_id]["num_pages"]
131
- page_image_names = docs_metadata[doc_id]["page_image_names"]
132
 
133
  else:
134
  try:
135
  images = pdf2image.convert_from_path(document_filepath)
136
  except:
137
- print(doc_id)
138
  return -1, -1
139
  num_pages = len(images)
140
  page_image_dir = ("/").join(
@@ -158,12 +158,12 @@ def get_document_metadata(docs_metadata, doc_id, document_filepath):
158
 
159
  def get_document_metadata_block(docs_metadata, documents_path_dict, documents_blocks):
160
  new_doc_metadata = {}
161
- for doc_id in documents_blocks:
162
- document_filepath = documents_path_dict[doc_id]
163
  num_pages, page_image_names = get_document_metadata(
164
- docs_metadata, doc_id, document_filepath
165
  )
166
- new_doc_metadata[doc_id] = {"num_pages": num_pages, "page_image_names": page_image_names}
167
 
168
  return new_doc_metadata
169
  """
@@ -195,7 +195,7 @@ def parse_azure_box(box, page_width, page_height):
195
 
196
  def get_ocr_information(ocr_path, num_pages):
197
  ocr_info = load_json(ocr_path)
198
- #num_pages, _ = get_document_metadata(documents_metadata, doc_id, documents[doc_id])
199
 
200
  page_ocr_tokens = [[] for page_ix in range(num_pages)]
201
  page_ocr_boxes = [[] for page_ix in range(num_pages)]
@@ -227,8 +227,8 @@ def create_header(split, version, has_answer):
227
  return header
228
 
229
 
230
- def get_document_info(documents_metadata, doc_id):
231
- doc_metadata = documents_metadata[doc_id]
232
  num_pages = doc_metadata["num_pages"]
233
  page_image_names = doc_metadata["page_image_names"]
234
  return num_pages, page_image_names
@@ -243,10 +243,10 @@ def create_imdb_record_from_json(
243
  record, documents_metadata, documents_ocr_information, split, include_answers
244
  ):
245
 
246
- doc_id = record["docId"]
247
- # document_filepath = documents_dict[doc_id]
248
- num_pages, page_image_names = get_document_info(documents_metadata, doc_id)
249
- document_ocr_info = documents_ocr_information[doc_id]
250
 
251
  if include_answers:
252
  answers = format_answers(record["answers"])
@@ -256,7 +256,7 @@ def create_imdb_record_from_json(
256
  imdb_record = {
257
  "question_id": record["questionId"],
258
  "question": record["question"],
259
- "docId": doc_id,
260
  "image_name": page_image_names,
261
  "num_pages": num_pages,
262
  "ocr_tokens": document_ocr_info["ocr_tokens"],
@@ -276,7 +276,7 @@ def create_imdb_record_from_json(
276
 
277
 
278
  def create_imdb_from_json(
279
- data, documents_metadata, documents_ocr_information, split, version, include_answers
280
  ):
281
  imdb_header = create_header(split, version, include_answers)
282
 
@@ -305,6 +305,7 @@ if __name__ == "__main__":
305
  if split != "val":
306
  continue
307
 
 
308
  OCR_paths = []
309
  document_paths = []
310
  for i, x in enumerate(dataset):
@@ -313,20 +314,17 @@ if __name__ == "__main__":
313
  if x["document"] not in document_paths:
314
  document_paths.append(x["document"])
315
  OCR_paths.append(x["OCR"])
 
316
 
317
  document_paths = document_paths[:30]
318
  OCR_paths = OCR_paths[:30]
319
 
320
  # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
321
-
322
- # document_ids = [document_filepath.split("/")[-1].replace(".pdf", "") for document_filepath in document_paths]
323
  documents_metadata_filename = f"{split}-documents_metadata.json"
324
  if os.path.exists(documents_metadata_filename):
325
  documents_metadata = load_json(documents_metadata_filename)
326
  else:
327
  documents_metadata = {}
328
- # for document_filepath in tqdm(document_paths):
329
- # documents_metadata[docId] = pdf_to_images(document_filepath)
330
  num_jobs = 6
331
  block_size = int(len(document_paths) / num_jobs) + 1
332
  print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
@@ -372,10 +370,10 @@ if __name__ == "__main__":
372
  save_json(documents_ocr_filename, documents_ocr_info)
373
 
374
  imdb = create_imdb_from_json(
375
- data,
376
  documents_metadata=documents_metadata,
377
  documents_ocr_information=doc_ocr_info,
378
- split="train",
379
  version="0.1",
380
  include_answers=True,
381
  )
@@ -407,7 +405,7 @@ if __name__ == "__main__":
407
 
408
  # dict to unique docs
409
  # documents_metadata[docId] = {"num_pages": num_pages, "page_image_names": image_names}
410
- # doc_ocr_info[doc_id] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
411
 
412
  """
413
  train_imdb = create_imdb_from_json(
 
124
 
125
 
126
  """
127
+ def get_document_metadata(docs_metadata, docId, document_filepath):
128
 
129
+ if docId in docs_metadata and docs_metadata[docId]["num_pages"] != -1:
130
+ num_pages = docs_metadata[docId]["num_pages"]
131
+ page_image_names = docs_metadata[docId]["page_image_names"]
132
 
133
  else:
134
  try:
135
  images = pdf2image.convert_from_path(document_filepath)
136
  except:
137
+ print(docId)
138
  return -1, -1
139
  num_pages = len(images)
140
  page_image_dir = ("/").join(
 
158
 
159
  def get_document_metadata_block(docs_metadata, documents_path_dict, documents_blocks):
160
  new_doc_metadata = {}
161
+ for docId in documents_blocks:
162
+ document_filepath = documents_path_dict[docId]
163
  num_pages, page_image_names = get_document_metadata(
164
+ docs_metadata, docId, document_filepath
165
  )
166
+ new_doc_metadata[docId] = {"num_pages": num_pages, "page_image_names": page_image_names}
167
 
168
  return new_doc_metadata
169
  """
 
195
 
196
  def get_ocr_information(ocr_path, num_pages):
197
  ocr_info = load_json(ocr_path)
198
+ #num_pages, _ = get_document_metadata(documents_metadata, docId, documents[docId])
199
 
200
  page_ocr_tokens = [[] for page_ix in range(num_pages)]
201
  page_ocr_boxes = [[] for page_ix in range(num_pages)]
 
227
  return header
228
 
229
 
230
+ def get_document_info(documents_metadata, docId):
231
+ doc_metadata = documents_metadata[docId]
232
  num_pages = doc_metadata["num_pages"]
233
  page_image_names = doc_metadata["page_image_names"]
234
  return num_pages, page_image_names
 
243
  record, documents_metadata, documents_ocr_information, split, include_answers
244
  ):
245
 
246
+ docId = record["docId"]
247
+ # document_filepath = documents_dict[docId]
248
+ num_pages, page_image_names = get_document_info(documents_metadata, docId)
249
+ document_ocr_info = documents_ocr_information[docId]
250
 
251
  if include_answers:
252
  answers = format_answers(record["answers"])
 
256
  imdb_record = {
257
  "question_id": record["questionId"],
258
  "question": record["question"],
259
+ "docId": docId,
260
  "image_name": page_image_names,
261
  "num_pages": num_pages,
262
  "ocr_tokens": document_ocr_info["ocr_tokens"],
 
276
 
277
 
278
  def create_imdb_from_json(
279
+ data, documents_metadata, documents_ocr_information, split, version, include_answers=True
280
  ):
281
  imdb_header = create_header(split, version, include_answers)
282
 
 
305
  if split != "val":
306
  continue
307
 
308
+ split_indices = []
309
  OCR_paths = []
310
  document_paths = []
311
  for i, x in enumerate(dataset):
 
314
  if x["document"] not in document_paths:
315
  document_paths.append(x["document"])
316
  OCR_paths.append(x["OCR"])
317
+ split_indices.append(i)
318
 
319
  document_paths = document_paths[:30]
320
  OCR_paths = OCR_paths[:30]
321
 
322
  # 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
 
 
323
  documents_metadata_filename = f"{split}-documents_metadata.json"
324
  if os.path.exists(documents_metadata_filename):
325
  documents_metadata = load_json(documents_metadata_filename)
326
  else:
327
  documents_metadata = {}
 
 
328
  num_jobs = 6
329
  block_size = int(len(document_paths) / num_jobs) + 1
330
  print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
 
370
  save_json(documents_ocr_filename, documents_ocr_info)
371
 
372
  imdb = create_imdb_from_json(
373
+ dataset.select(split_indices),
374
  documents_metadata=documents_metadata,
375
  documents_ocr_information=doc_ocr_info,
376
+ split=split,
377
  version="0.1",
378
  include_answers=True,
379
  )
 
405
 
406
  # dict to unique docs
407
  # documents_metadata[docId] = {"num_pages": num_pages, "page_image_names": image_names}
408
+ # doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
409
 
410
  """
411
  train_imdb = create_imdb_from_json(