jordyvl commited on
Commit
65f8b35
1 Parent(s): 114dfee

Link established to public test set

Browse files
Files changed (1) hide show
  1. DUDE_loader.py +40 -19
DUDE_loader.py CHANGED
@@ -14,15 +14,14 @@
14
  # limitations under the License.
15
  """DUDE dataset loader"""
16
 
17
- import os
18
  import copy
19
  import json
 
20
  from typing import List, Literal
21
- import pdf2image
22
- from tqdm import tqdm
23
 
24
  import datasets
25
-
 
26
 
27
  _CITATION = """
28
  @inproceedings{dude2023icdar,
@@ -43,16 +42,19 @@ _HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"
43
 
44
  _LICENSE = "CC BY 4.0"
45
 
46
- _SPLITS = ["train", "val"]
47
 
48
  _URLS = {
49
- # "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_binaries.tar.gz", #
50
- # "annotations": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_dataset-sample_gt.json" #"
51
- "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz", # DUDE_train-val-test_binaries.tar.gz
52
- "annotations": "https://zenodo.org/record/7680589/files/DUDE_gt_release-candidate.json?download=1", #_trainval
53
  }
54
 
55
- SKIP_DOC_IDS = ["ef03364aa27a0987c9870472e312aceb", "5c5a5880e6a73b4be2315d506ab0b15b"]
 
 
 
 
56
 
57
 
58
  def parse_bbox(bbox):
@@ -79,14 +81,15 @@ def batched_conversion(pdf_file):
79
  info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
80
  maxPages = info["Pages"]
81
 
82
- logger.info(f"{pdf_file} has {str(maxPages)} pages")
83
-
84
  images = []
85
 
86
  for page in range(1, maxPages + 1, 10):
87
  images.extend(
88
  pdf2image.convert_from_path(
89
- pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
 
 
 
90
  )
91
  )
92
  return images
@@ -139,7 +142,7 @@ def builder_configs(version):
139
  class DUDE(datasets.GeneratorBasedBuilder):
140
  """DUDE dataset."""
141
 
142
- VERSION = datasets.Version("0.0.1")
143
 
144
  BUILDER_CONFIGS = builder_configs(VERSION)
145
 
@@ -188,14 +191,17 @@ class DUDE(datasets.GeneratorBasedBuilder):
188
  self, dl_manager: datasets.DownloadManager
189
  ) -> List[datasets.SplitGenerator]:
190
 
191
- annotations = json.load(open(_URLS[f"annotations"], "r"))
192
- # binaries_archive = dl_manager.iter_archive(binaries_path)
 
 
193
 
194
  if self.config.data_dir: # when unpacked to a custom directory
195
  binary_extraction_path = self.config.data_dir
196
  else:
197
  binaries_path = dl_manager.download(_URLS["binaries"])
198
  binary_extraction_path = dl_manager.extract(binaries_path)
 
199
 
200
  splits = []
201
  for split in _SPLITS:
@@ -213,7 +219,9 @@ class DUDE(datasets.GeneratorBasedBuilder):
213
 
214
  def _generate_examples(self, binary_extraction_path, annotations, split):
215
  def retrieve_doc(docid):
216
- extracted_path = os.path.join(binary_extraction_path, "PDF", split, docid + ".pdf")
 
 
217
  return extracted_path
218
 
219
  def retrieve_OCR(docid, ocr_engine="Amazon", format="original"):
@@ -222,14 +230,27 @@ class DUDE(datasets.GeneratorBasedBuilder):
222
  )
223
  return extracted_path
224
 
225
- annotations = [x for x in annotations if x["data_split"] == split]
 
 
 
 
 
226
 
227
  for i, a in enumerate(annotations):
228
  if a["docId"] in SKIP_DOC_IDS:
229
  continue
230
  a = dict(a)
231
  a["data_split"] = split
232
- a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
 
 
 
 
 
 
 
 
233
  docpath = retrieve_doc(a["docId"])
234
  ocrpath = retrieve_OCR(a["docId"])
235
  if self.config.binary_mode:
 
14
  # limitations under the License.
15
  """DUDE dataset loader"""
16
 
 
17
  import copy
18
  import json
19
+ import os
20
  from typing import List, Literal
 
 
21
 
22
  import datasets
23
+ import pdf2image
24
+ from tqdm import tqdm
25
 
26
  _CITATION = """
27
  @inproceedings{dude2023icdar,
 
42
 
43
  _LICENSE = "CC BY 4.0"
44
 
45
+ _SPLITS = ["train", "val", "test"]
46
 
47
  _URLS = {
48
+ "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz",
49
+ "annotations": "https://zenodo.org/record/7680617/files/2023-03-09_DUDE_gt_release-candidate_PUBLIC.json?download=1"
50
+ # "blind": "/home/jordy/code/DUchallenge/DUDEeval/gt/2023-03-07_DUDE_gt_release-candidate_NOTSHARABLE.json",
 
51
  }
52
 
53
+ SKIP_DOC_IDS = [
54
+ "nan",
55
+ "ef03364aa27a0987c9870472e312aceb",
56
+ "5c5a5880e6a73b4be2315d506ab0b15b",
57
+ ]
58
 
59
 
60
  def parse_bbox(bbox):
 
81
  info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
82
  maxPages = info["Pages"]
83
 
 
 
84
  images = []
85
 
86
  for page in range(1, maxPages + 1, 10):
87
  images.extend(
88
  pdf2image.convert_from_path(
89
+ pdf_file,
90
+ dpi=200,
91
+ first_page=page,
92
+ last_page=min(page + 10 - 1, maxPages),
93
  )
94
  )
95
  return images
 
142
  class DUDE(datasets.GeneratorBasedBuilder):
143
  """DUDE dataset."""
144
 
145
+ VERSION = datasets.Version("1.0.7")
146
 
147
  BUILDER_CONFIGS = builder_configs(VERSION)
148
 
 
191
  self, dl_manager: datasets.DownloadManager
192
  ) -> List[datasets.SplitGenerator]:
193
 
194
+ if "blind" in _URLS and os.path.exists(_URLS[f"blind"]):
195
+ annotations = json.load(open(_URLS[f"blind"], "r"))
196
+ else:
197
+ annotations = json.load(open(_URLS[f"annotations"], "r"))
198
 
199
  if self.config.data_dir: # when unpacked to a custom directory
200
  binary_extraction_path = self.config.data_dir
201
  else:
202
  binaries_path = dl_manager.download(_URLS["binaries"])
203
  binary_extraction_path = dl_manager.extract(binaries_path)
204
+ # binaries_archive = dl_manager.iter_archive(binaries_path)
205
 
206
  splits = []
207
  for split in _SPLITS:
 
219
 
220
  def _generate_examples(self, binary_extraction_path, annotations, split):
221
  def retrieve_doc(docid):
222
+ extracted_path = os.path.join(
223
+ binary_extraction_path, "PDF", split, docid + ".pdf"
224
+ )
225
  return extracted_path
226
 
227
  def retrieve_OCR(docid, ocr_engine="Amazon", format="original"):
 
230
  )
231
  return extracted_path
232
 
233
+ split_condition = (
234
+ lambda x, split: bool(x["data_split"] == split)
235
+ if split in ["train", "val"]
236
+ else bool(split in x["data_split"])
237
+ ) # test, test2; only relevant for blind set
238
+ annotations = [x for x in annotations if split_condition(x, split)]
239
 
240
  for i, a in enumerate(annotations):
241
  if a["docId"] in SKIP_DOC_IDS:
242
  continue
243
  a = dict(a)
244
  a["data_split"] = split
245
+ if not "answers" in a.keys(): # test set has no ground truth provided
246
+ a["answers"] = None
247
+ a["answers_variants"] = None
248
+ a["answer_type"] = None
249
+ a["answers_page_bounding_boxes"] = None
250
+ else:
251
+ a["answers_page_bounding_boxes"] = parse_bbox(
252
+ a.get("answers_page_bounding_boxes", [])
253
+ )
254
  docpath = retrieve_doc(a["docId"])
255
  ocrpath = retrieve_OCR(a["docId"])
256
  if self.config.binary_mode: