jordyvl commited on
Commit
9ffbec4
1 Parent(s): b940079

small patch for answer variants

Browse files
Files changed (3) hide show
  1. DUDE_imdb_loader.py +73 -24
  2. DUDE_loader.py +1 -2
  3. test_loader.py +1 -1
DUDE_imdb_loader.py CHANGED
@@ -36,7 +36,7 @@ from datasets import load_dataset_builder, load_dataset, logging
36
 
37
  logger = logging.get_logger(__name__)
38
 
39
- PIL_Image.MAX_IMAGE_PIXELS = None #933120000
40
 
41
  MAX_PAGES = 50
42
  MAX_PDF_SIZE = 100000000 # almost 100MB
@@ -63,7 +63,9 @@ def get_images_pdf2image(document_filepath, chunksize=10):
63
  try:
64
  images.extend(
65
  pdf2image.convert_from_path(
66
- document_filepath, first_page=page, last_page=min(page + chunksize - 1, maxPages)
 
 
67
  )
68
  )
69
  except Exception as e:
@@ -78,7 +80,9 @@ def pdf_to_images(document_filepath, converter="PyPDF2"):
78
  page_image_name = document_filepath.replace("PDF", "images").replace(
79
  ".pdf", f"_{page_idx}.jpg"
80
  )
81
- page_image_names.append(page_image_name.replace(page_image_dir, page_image_dir.split('/')[-1])) # without dir
 
 
82
  if not os.path.exists(page_image_name):
83
  page_image.convert("RGB").save(page_image_name)
84
  return page_image_names
@@ -211,7 +215,7 @@ def get_document_info(documents_metadata, docId):
211
 
212
 
213
  def format_answers(answers_list):
214
- answers_list = list(set([answer.lower() for answer in answers_list]))
215
  return answers_list
216
 
217
 
@@ -220,7 +224,6 @@ def create_imdb_record_from_json(
220
  ):
221
 
222
  docId = record["docId"].split("_")[0]
223
- # document_filepath = documents_dict[docId]
224
  try:
225
  num_pages, page_image_names = get_document_info(documents_metadata, docId)
226
  document_ocr_info = documents_ocr_info[docId]
@@ -237,12 +240,13 @@ def create_imdb_record_from_json(
237
  else:
238
  answers = None
239
 
240
- if include_variants:
241
- import pdb; pdb.set_trace() # breakpoint 03e74e0e //
242
-
243
 
244
- #fix if wrongly saved pagenames
245
- page_image_names = [split+image_name for image_name in page_image_names if image_name.startswith('/')]
 
 
246
 
247
  imdb_record = {
248
  "question_id": record["questionId"],
@@ -253,7 +257,7 @@ def create_imdb_record_from_json(
253
  "ocr_tokens": document_ocr_info["ocr_tokens"],
254
  "ocr_normalized_boxes": document_ocr_info["ocr_boxes"],
255
  "set_name": split,
256
- "answers": answers,
257
  "answer_page": None,
258
  "extra": {
259
  # 'question_type': record['qtype'],
@@ -267,14 +271,20 @@ def create_imdb_record_from_json(
267
 
268
 
269
  def create_imdb_from_json(
270
- data, documents_metadata, documents_ocr_info, split, version, include_answers=True
 
 
 
 
 
 
271
  ):
272
  imdb_header = create_header(split, version, include_answers)
273
 
274
  imdb_records = []
275
  for record in tqdm(data):
276
  imdb_record = create_imdb_record_from_json(
277
- record, documents_metadata, documents_ocr_info, split, include_answers
278
  )
279
  if imdb_record:
280
  imdb_records.append(imdb_record)
@@ -284,12 +294,53 @@ def create_imdb_from_json(
284
  return imdb
285
 
286
 
287
- if __name__ == "__main__":
288
- dataset = load_dataset(
289
- "../DUDE_loader/DUDE_loader.py",
290
- "DUDE",
291
- data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  )
 
 
 
 
 
 
 
 
 
 
 
 
293
  splits = dataset.keys()
294
 
295
  for split in splits:
@@ -297,7 +348,7 @@ if __name__ == "__main__":
297
  OCR_paths = []
298
  document_paths = []
299
  for i, x in enumerate(dataset[split]):
300
- if x["data_split"] != split: #redundant check
301
  continue
302
  if x["document"] not in document_paths:
303
  document_paths.append(x["document"])
@@ -371,12 +422,10 @@ if __name__ == "__main__":
371
  save_json(documents_ocr_filename, documents_ocr_info)
372
 
373
  imdb_filename = f"imdb_{split}.npy"
374
- if os.path.exists(imdb_filename):
375
  print(f"Loading from disk: {imdb_filename}")
376
  imdb = np.load(imdb_filename, allow_pickle=True)
377
 
378
- import pdb; pdb.set_trace() # breakpoint ff49174b //
379
-
380
  else:
381
  imdb = create_imdb_from_json(
382
  dataset[split], # .select(split_indices),
@@ -385,6 +434,6 @@ if __name__ == "__main__":
385
  split=split,
386
  version="0.1",
387
  include_answers=True,
 
388
  )
389
- np.save(imdb_filename, imdb)
390
- #Missing: 'a7b500f3e0244a50571769aaef4fabc7' a7b500f3e0244a50571769aaef4fabc7
 
36
 
37
  logger = logging.get_logger(__name__)
38
 
39
+ PIL_Image.MAX_IMAGE_PIXELS = None # 933120000
40
 
41
  MAX_PAGES = 50
42
  MAX_PDF_SIZE = 100000000 # almost 100MB
 
63
  try:
64
  images.extend(
65
  pdf2image.convert_from_path(
66
+ document_filepath,
67
+ first_page=page,
68
+ last_page=min(page + chunksize - 1, maxPages),
69
  )
70
  )
71
  except Exception as e:
 
80
  page_image_name = document_filepath.replace("PDF", "images").replace(
81
  ".pdf", f"_{page_idx}.jpg"
82
  )
83
+ page_image_names.append(
84
+ page_image_name.replace(page_image_dir, page_image_dir.split("/")[-1])
85
+ ) # without dir
86
  if not os.path.exists(page_image_name):
87
  page_image.convert("RGB").save(page_image_name)
88
  return page_image_names
 
215
 
216
 
217
  def format_answers(answers_list):
218
+ answers_list = list(set([answer for answer in answers_list])) # standardization is done
219
  return answers_list
220
 
221
 
 
224
  ):
225
 
226
  docId = record["docId"].split("_")[0]
 
227
  try:
228
  num_pages, page_image_names = get_document_info(documents_metadata, docId)
229
  document_ocr_info = documents_ocr_info[docId]
 
240
  else:
241
  answers = None
242
 
243
+ if include_variants and record['answers_variants']:
244
+ answers += record["answers_variants"]
 
245
 
246
+ # fix if wrongly saved pagenames
247
+ # page_image_names = [
248
+ # split + image_name for image_name in page_image_names if image_name.startswith("/")
249
+ # ]
250
 
251
  imdb_record = {
252
  "question_id": record["questionId"],
 
257
  "ocr_tokens": document_ocr_info["ocr_tokens"],
258
  "ocr_normalized_boxes": document_ocr_info["ocr_boxes"],
259
  "set_name": split,
260
+ "answers": answers, # list
261
  "answer_page": None,
262
  "extra": {
263
  # 'question_type': record['qtype'],
 
271
 
272
 
273
  def create_imdb_from_json(
274
+ data,
275
+ documents_metadata,
276
+ documents_ocr_info,
277
+ split,
278
+ version,
279
+ include_answers=True,
280
+ include_variants=False,
281
  ):
282
  imdb_header = create_header(split, version, include_answers)
283
 
284
  imdb_records = []
285
  for record in tqdm(data):
286
  imdb_record = create_imdb_record_from_json(
287
+ record, documents_metadata, documents_ocr_info, split, include_answers, include_variants
288
  )
289
  if imdb_record:
290
  imdb_records.append(imdb_record)
 
294
  return imdb
295
 
296
 
297
+ def parse_arguments():
298
+ import argparse
299
+
300
+ parser = argparse.ArgumentParser(
301
+ description="Instantiate HuggingFace dataloader and convert to ImDB format"
302
+ )
303
+
304
+ parser.add_argument(
305
+ "--redo-imdb-build",
306
+ action="store_true",
307
+ default=False,
308
+ help="Whether to rebuild the imdb record and overwrite",
309
+ )
310
+
311
+ # Optional
312
+ parser.add_argument(
313
+ "--no-include-variants",
314
+ action="store_false",
315
+ default=True,
316
+ help="Whether to include variants as full ground truths",
317
+ )
318
+
319
+ # Overwrite/Pass HF parameters
320
+ parser.add_argument(
321
+ "--DUDE_config",
322
+ type=str,
323
+ default="Amazon_original",
324
+ help="HF Config to load to control OCR version",
325
+ )
326
+ parser.add_argument(
327
+ "--data_dir",
328
+ type=str,
329
+ default="/home/jordy/Downloads/DUDE_train-val-test_binaries",
330
+ help="Load PDFs and store images",
331
  )
332
+
333
+ return parser.parse_args()
334
+
335
+
336
+ if __name__ == "__main__":
337
+ """
338
+ Parse and redo
339
+ """
340
+ args = parse_arguments()
341
+
342
+ dataset = load_dataset("jordyvl/DUDE_loader", args.DUDE_config, data_dir=args.data_dir)
343
+
344
  splits = dataset.keys()
345
 
346
  for split in splits:
 
348
  OCR_paths = []
349
  document_paths = []
350
  for i, x in enumerate(dataset[split]):
351
+ if x["data_split"] != split: # redundant check
352
  continue
353
  if x["document"] not in document_paths:
354
  document_paths.append(x["document"])
 
422
  save_json(documents_ocr_filename, documents_ocr_info)
423
 
424
  imdb_filename = f"imdb_{split}.npy"
425
+ if os.path.exists(imdb_filename) and not args.redo_imdb_build:
426
  print(f"Loading from disk: {imdb_filename}")
427
  imdb = np.load(imdb_filename, allow_pickle=True)
428
 
 
 
429
  else:
430
  imdb = create_imdb_from_json(
431
  dataset[split], # .select(split_indices),
 
434
  split=split,
435
  version="0.1",
436
  include_answers=True,
437
+ include_variants=(not args.no_include_variants),
438
  )
439
+ np.save(imdb_filename, imdb)
 
DUDE_loader.py CHANGED
@@ -49,9 +49,8 @@ _URLS = {
49
  # "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_binaries.tar.gz", #
50
  # "annotations": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_dataset-sample_gt.json" #"
51
  "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz", # DUDE_train-val-test_binaries.tar.gz
52
- "annotations": "https://zenodo.org/record/7652563/files/DUDE_gt_release-candidate_trainval.json?download=1",
53
  }
54
-
55
  SKIP_DOC_IDS = ["ef03364aa27a0987c9870472e312aceb", "5c5a5880e6a73b4be2315d506ab0b15b"]
56
 
57
 
 
49
  # "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_binaries.tar.gz", #
50
  # "annotations": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_dataset-sample_gt.json" #"
51
  "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz", # DUDE_train-val-test_binaries.tar.gz
52
+ "annotations": "https://zenodo.org/record/7662385/files/DUDE_gt_release-candidate_trainval.json?download=1",
53
  }
 
54
  SKIP_DOC_IDS = ["ef03364aa27a0987c9870472e312aceb", "5c5a5880e6a73b4be2315d506ab0b15b"]
55
 
56
 
test_loader.py CHANGED
@@ -22,7 +22,7 @@ from codetiming import Timer
22
  for binding in ["dict_annotations (new)"]: #"dict_PDF",
23
  with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
24
  if binding == "dict_annotations (new)":
25
- ds = load_dataset("../DUDE_loader/DUDE_loader.py", 'Amazon_original', data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries", ocr_engine='Azure') #ignore_verifications=True, , writer_batch_size=10
26
  else:
27
  ds = load_dataset("jordyvl/DUDE_loader", revision='db20bbf751b14e14e8143170bc201948ef5ac83c')
28
 
 
22
  for binding in ["dict_annotations (new)"]: #"dict_PDF",
23
  with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
24
  if binding == "dict_annotations (new)":
25
+ ds = load_dataset("jordyvl/DUDE_loader", 'Amazon_original', data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries") #ignore_verifications=True, , writer_batch_size=10
26
  else:
27
  ds = load_dataset("jordyvl/DUDE_loader", revision='db20bbf751b14e14e8143170bc201948ef5ac83c')
28