jordyvl commited on
Commit
2d80c91
1 Parent(s): 52ed403

Script for imdb file conversion

Browse files
Files changed (1) hide show
  1. DUDE_imdb_loader.py +17 -67
DUDE_imdb_loader.py CHANGED
@@ -282,14 +282,11 @@ if __name__ == "__main__":
282
  splits = dataset.keys()
283
 
284
  for split in splits:
285
- if split != "val":
286
- continue
287
-
288
  split_indices = []
289
  OCR_paths = []
290
  document_paths = []
291
  for i, x in enumerate(dataset[split]):
292
- if x["data_split"] != split:
293
  continue
294
  if x["document"] not in document_paths:
295
  document_paths.append(x["document"])
@@ -306,7 +303,7 @@ if __name__ == "__main__":
306
  documents_metadata = load_json(documents_metadata_filename)
307
  else:
308
  documents_metadata = {}
309
- num_jobs = 1
310
  block_size = int(len(document_paths) / num_jobs) + 1
311
  print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
312
  document_blocks = [
@@ -332,7 +329,7 @@ if __name__ == "__main__":
332
  # 2. Process OCR to obtain doc_ocr_info
333
  documents_ocr_filename = f"{split}-documents_ocr.json"
334
 
335
- if os.path.exists(documents_ocr_filename) and False:
336
  print(f"Loading from disk: {documents_ocr_filename}")
337
  documents_ocr_info = load_json(documents_ocr_filename)
338
  else:
@@ -362,65 +359,18 @@ if __name__ == "__main__":
362
 
363
  save_json(documents_ocr_filename, documents_ocr_info)
364
 
365
- imdb = create_imdb_from_json(
366
- dataset[split], # .select(split_indices),
367
- documents_metadata=documents_metadata,
368
- documents_ocr_info=documents_ocr_info,
369
- split=split,
370
- version="0.1",
371
- include_answers=True,
372
- )
373
- np.save(f"{split}_imdb.npy", imdb) # dump to lerna
374
-
375
- import pdb
376
-
377
- pdb.set_trace() # breakpoint 930f4f6a //
378
-
379
- # page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
380
- # if not os.path.exists(page_image_dir):
381
- # os.makedirs(page_image_dir)
382
- # dataset.info.features
383
-
384
- """
385
- Describe all steps that need to happen after loading HF DUDE dataset
386
- Change functions
387
-
388
-
389
- page_images_dir
390
-
391
-
392
-
393
- 2. Process OCR to obtain doc_ocr_info
394
- """
395
- # update dataset with
396
- # for split in SPLITS
397
-
398
- # documents_metadata
399
- # doc_ocr_info
400
-
401
- # dict to unique docs
402
- # documents_metadata[docId] = {"num_pages": num_pages, "page_image_names": image_names}
403
- # doc_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
404
-
405
- """
406
- train_imdb = create_imdb_from_json(
407
- train_data,
408
- documents_metadata=documents_metadata,
409
- documents_ocr_info=doc_ocr_info,
410
- split="train",
411
- version="0.1",
412
- include_answers=True,
413
- )
414
-
415
 
416
- np.save("Imdb/train_imdb.npy", train_imdb)
417
-
418
- document_paths = []
419
- num_jobs = 6
420
- block_size = int(len(document_ids) / num_jobs) + 1
421
- print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_ids)})")
422
- parallel_results = Parallel(n_jobs=num_jobs)(
423
- delayed(get_document_metadata_block)(documents_metadata, documents, documents_blocks[i])
424
- for i in range(num_jobs)
425
- )
426
- """
 
282
  splits = dataset.keys()
283
 
284
  for split in splits:
 
 
 
285
  split_indices = []
286
  OCR_paths = []
287
  document_paths = []
288
  for i, x in enumerate(dataset[split]):
289
+ if x["data_split"] != split: #redundant check
290
  continue
291
  if x["document"] not in document_paths:
292
  document_paths.append(x["document"])
 
303
  documents_metadata = load_json(documents_metadata_filename)
304
  else:
305
  documents_metadata = {}
306
+ num_jobs = 6
307
  block_size = int(len(document_paths) / num_jobs) + 1
308
  print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
309
  document_blocks = [
 
329
  # 2. Process OCR to obtain doc_ocr_info
330
  documents_ocr_filename = f"{split}-documents_ocr.json"
331
 
332
+ if os.path.exists(documents_ocr_filename):
333
  print(f"Loading from disk: {documents_ocr_filename}")
334
  documents_ocr_info = load_json(documents_ocr_filename)
335
  else:
 
359
 
360
  save_json(documents_ocr_filename, documents_ocr_info)
361
 
362
+ imdb_filename = f"{split}_imdb.npy"
363
+ if os.path.exists(imdb_filename):
364
+ print(f"Loading from disk: {imdb_filename}")
365
+ imdb = np.load(imdb_filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
+ else:
368
+ imdb = create_imdb_from_json(
369
+ dataset[split], # .select(split_indices),
370
+ documents_metadata=documents_metadata,
371
+ documents_ocr_info=documents_ocr_info,
372
+ split=split,
373
+ version="0.1",
374
+ include_answers=True,
375
+ )
376
+ np.save(imdb_filename, imdb)