jordyvl commited on
Commit
49cf593
1 Parent(s): f668c42

Dataloader supporting trainval - remaining memory issue

Browse files
Files changed (4) hide show
  1. DUDE_loader.py +88 -50
  2. README.md +1 -0
  3. data/DUDE_dataset-sample_gt.json +23 -12
  4. test_loader.py +2 -2
DUDE_loader.py CHANGED
@@ -17,7 +17,6 @@
17
  import os
18
  import copy
19
  import json
20
- from pathlib import Path
21
  from typing import List
22
  import pdf2image
23
  from tqdm import tqdm
@@ -29,7 +28,7 @@ import datasets
29
  _CITATION = """
30
  @inproceedings{dude2023icdar,
31
  title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)},
32
- author={Van Landeghem, Jordy et . al.},
33
  booktitle={Proceedings of the ICDAR},
34
  year={2023}
35
  }
@@ -38,26 +37,53 @@ _CITATION = """
38
  _DESCRIPTION = """\
39
  DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them.
40
  Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason
41
- over it to answer DUDE questions. DUDE Contains X questions and Y and ...
42
- """
43
 
44
  _HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"
45
 
46
  _LICENSE = "CC BY 4.0"
47
 
48
- _SPLITS = ["sample"] # ["train", "val", "test"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- _URLS = {}
51
- for split in _SPLITS:
52
- _URLS[
53
- f"{split}_annotations"
54
- ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_dataset.json"
55
- _URLS[
56
- f"{split}_pdfs"
57
- ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_pdfs.tar.gz"
58
- _URLS[
59
- f"{split}_OCR"
60
- ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_OCR.tar.gz"
 
 
 
 
 
 
61
 
62
 
63
  def batched_conversion(pdf_file):
@@ -133,21 +159,27 @@ class DUDE(datasets.GeneratorBasedBuilder):
133
  self, dl_manager: datasets.DownloadManager
134
  ) -> List[datasets.SplitGenerator]:
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  splits = []
137
- for split in _SPLITS:
138
- annotations = {}
139
- if f"{split}_annotations" in _URLS: # blind test set
140
- annotations = json.load(open(_URLS[f"{split}_annotations"], "r"))
141
- pdfs_archive_path = dl_manager.download(_URLS[f"{split}_pdfs"])
142
- pdfs_archive = dl_manager.iter_archive(pdfs_archive_path)
143
- OCR_archive_path = dl_manager.download(_URLS[f"{split}_OCR"])
144
- OCR_archive = dl_manager.iter_archive(OCR_archive_path)
145
  splits.append(
146
  datasets.SplitGenerator(
147
  name=split,
148
  gen_kwargs={
149
- "pdfs_archive": pdfs_archive,
150
- "OCR_archive": OCR_archive,
151
  "annotations": annotations,
152
  "split": split,
153
  },
@@ -155,41 +187,47 @@ class DUDE(datasets.GeneratorBasedBuilder):
155
  )
156
  return splits
157
 
158
- def _generate_examples(self, pdfs_archive, OCR_archive, annotations, split):
159
- def retrieve_doc(pdfs_archive, docid):
160
- for file_path, file_obj in pdfs_archive:
161
- path, ext = file_path.split(".")
162
- md5 = path.split("/")[-1]
163
 
164
- if md5 == docid:
165
- # images = pdf2image.convert_from_bytes(file_obj.read())
166
- return file_obj.read() # binary
167
-
168
- def retrieve_OCR(OCR_archive, docid):
169
- for file_path, file_obj in OCR_archive:
170
- # /DUDE_sample_OCR/OCR/Amazon Textract/md5_{original,due}.json
171
- path, ext = file_path.split(".")
172
- filename = path.split("/")[-1]
173
- md5 = filename.split("_")[0]
174
 
175
- if md5 == docid and "original" in filename:
176
- return json.loads(file_obj.read()) # binary
177
 
178
  question = self.info.features["question"]
179
  answers = self.info.features["answers"]
180
 
181
  extensions = {"pdf", "PDF"}
182
 
 
 
183
  for i, a in enumerate(annotations):
184
  a["data_split"] = split
185
- a["document"] = retrieve_doc(pdfs_archive, a["docId"])
186
- a["OCR"] = retrieve_OCR(OCR_archive, a["docId"])
 
 
 
 
 
187
 
 
188
  # FIXES for faulty generation
189
- #a.pop("answers_page_bounding_boxes") # fix later
190
- if a["answers_page_bounding_boxes"] in [ [], [[]] ]:
191
  a["answers_page_bounding_boxes"] = None
192
  else:
193
- if isinstance(a['answers_page_bounding_boxes'][0], list):
194
- a["answers_page_bounding_boxes"] = a['answers_page_bounding_boxes'][0]
195
- yield i, a
 
 
 
 
 
17
  import os
18
  import copy
19
  import json
 
20
  from typing import List
21
  import pdf2image
22
  from tqdm import tqdm
 
28
  _CITATION = """
29
  @inproceedings{dude2023icdar,
30
  title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)},
31
+ author={Van Landeghem, Jordy et . al.},
32
  booktitle={Proceedings of the ICDAR},
33
  year={2023}
34
  }
 
37
  _DESCRIPTION = """\
38
  DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them.
39
  Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason
40
+ over it to answer DUDE questions.
41
+ """ # DUDE Contains X questions and Y and ...
42
 
43
  _HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"
44
 
45
  _LICENSE = "CC BY 4.0"
46
 
47
+ _SPLITS = ["train", "val"]
48
+
49
+ _URLS = {
50
+ # "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_binaries.tar.gz", #
51
+ # "annotations": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_dataset-sample_gt.json" #"
52
+ "binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz", # DUDE_train-val-test_binaries.tar.gz
53
+ "annotations": "https://zenodo.org/record/7600505/files/DUDE_gt_release-candidate_trainval_exabs.json?download=1",
54
+ }
55
+
56
+ #'0017b64bd017f06db47e56a6a113e22e'
57
+ SKIP_DOC_IDS = ["ef03364aa27a0987c9870472e312aceb", "5c5a5880e6a73b4be2315d506ab0b15b"]
58
+
59
+
60
+ def parse_bbox(bbox):
61
+
62
+ if bbox in [[], [[]]]:
63
+ return None
64
+
65
+ answers_page_bounding_boxes = []
66
+
67
+ if isinstance(bbox[0], list):
68
+ bbox = bbox[0]
69
 
70
+ keys = ["left", "top", "width", "height", "page"]
71
+
72
+ for page_bb in bbox:
73
+ if len(page_bb) == 0:
74
+ continue
75
+ page_bb = {key: page_bb[key] for key in keys}
76
+ """
77
+ if page_bb.get("label"):
78
+ del page_bb["label"]
79
+ if page_bb.get("error"):
80
+ del page_bb["error"]
81
+ if page_bb.get("multipage_box"):
82
+ del page_bb["multipage_box"]
83
+ #assert all(key in page_bb for key in keys)
84
+ """
85
+ answers_page_bounding_boxes.append(page_bb)
86
+ return answers_page_bounding_boxes
87
 
88
 
89
  def batched_conversion(pdf_file):
 
159
  self, dl_manager: datasets.DownloadManager
160
  ) -> List[datasets.SplitGenerator]:
161
 
162
+ annotations = json.load(open(_URLS[f"annotations"], "r"))
163
+ # binaries_archive = dl_manager.iter_archive(binaries_path)
164
+
165
+ if self.config.data_dir: #unpacked it to a custom directory
166
+ binary_extraction_path = self.config.data_dir
167
+ else:
168
+ binaries_path = dl_manager.download(_URLS['binaries'])
169
+ binary_extraction_path = dl_manager.extract(binaries_path)
170
+ binary_extraction_path += "/home/jordy/Downloads/" + _URLS["binaries"].split("/")[
171
+ -1
172
+ ].replace(
173
+ ".tar.gz", ""
174
+ ) # weird unpacking behaviour
175
+
176
  splits = []
177
+ for split in _SPLITS: # split archive
 
 
 
 
 
 
 
178
  splits.append(
179
  datasets.SplitGenerator(
180
  name=split,
181
  gen_kwargs={
182
+ "binary_extraction_path": binary_extraction_path,
 
183
  "annotations": annotations,
184
  "split": split,
185
  },
 
187
  )
188
  return splits
189
 
190
+ def _generate_examples(self, binary_extraction_path, annotations, split):
191
+ def retrieve_doc(docid):
192
+ extracted_path = os.path.join(binary_extraction_path, "PDF", split, docid + ".pdf")
193
+ with open(extracted_path, "rb") as f:
194
+ return f.read()
195
 
196
+ def retrieve_OCR(docid, ocr_engine="Amazon", format="original"):
197
+ extracted_path = os.path.join(
198
+ binary_extraction_path, "OCR", ocr_engine, docid + f"_{format}.json"
199
+ )
 
 
 
 
 
 
200
 
201
+ with open(extracted_path, "rb") as f:
202
+ return f.read()
203
 
204
  question = self.info.features["question"]
205
  answers = self.info.features["answers"]
206
 
207
  extensions = {"pdf", "PDF"}
208
 
209
+ annotations = [x for x in annotations if x["data_split"] == split]
210
+
211
  for i, a in enumerate(annotations):
212
  a["data_split"] = split
213
+ if a["docId"] in SKIP_DOC_IDS:
214
+ continue
215
+ a["document"] = retrieve_doc(a["docId"])
216
+ a["OCR"] = retrieve_OCR(a["docId"])
217
+
218
+ a["answers_page_bounding_boxes"] = parse_bbox(a["answers_page_bounding_boxes"])
219
+ yield i, a
220
 
221
+ """
222
  # FIXES for faulty generation
223
+ # a.pop("answers_page_bounding_boxes") # fix later
224
+ if a["answers_page_bounding_boxes"] in [[], [[]]]:
225
  a["answers_page_bounding_boxes"] = None
226
  else:
227
+ if isinstance(a["answers_page_bounding_boxes"][0], list):
228
+ a["answers_page_bounding_boxes"] = a["answers_page_bounding_boxes"][0]
229
+ # if i == 2303:
230
+ try:
231
+ except Exception as e:
232
+ print(f"Something wrong in {split}-{i} {e}")
233
+ """
README.md CHANGED
@@ -1,3 +1,4 @@
1
  ---
2
  license: cc-by-4.0
3
  ---
 
 
1
  ---
2
  license: cc-by-4.0
3
  ---
4
+
data/DUDE_dataset-sample_gt.json CHANGED
@@ -1,13 +1,24 @@
1
- [{'questionId': '0017b64bd017f06db47e56a6a113e22e_bb55e0af451429f2dcae69e6d0713616',
2
- 'question': 'What is the first and last name of the indvidual in list # 539?',
3
- 'answers': ['Ajay Dev Goud'],
4
- 'answers_page_bounding_boxes': [[{'left': 353,
5
- 'top': 409,
6
- 'width': 198,
7
- 'height': 26,
8
- 'page': 8}]],
9
- 'answers_variants': [],
10
- 'answer_type': 'extractive',
11
- 'docId': '0017b64bd017f06db47e56a6a113e22e',
12
- 'data_split': 'train'}
 
 
 
 
 
 
 
 
 
 
 
13
  ]
 
1
+ [
2
+ {
3
+ "questionId": "0017b64bd017f06db47e56a6a113e22e_bb55e0af451429f2dcae69e6d0713616",
4
+ "question": "What is the first and last name of the indvidual in list # 539?",
5
+ "answers": [
6
+ "Ajay Dev Goud"
7
+ ],
8
+ "answers_page_bounding_boxes": [
9
+ [
10
+ {
11
+ "left": 353,
12
+ "top": 409,
13
+ "width": 198,
14
+ "height": 26,
15
+ "page": 8
16
+ }
17
+ ]
18
+ ],
19
+ "answers_variants": [],
20
+ "answer_type": "extractive",
21
+ "docId": "0017b64bd017f06db47e56a6a113e22e",
22
+ "data_split": "train"
23
+ }
24
  ]
test_loader.py CHANGED
@@ -19,10 +19,10 @@ from datasets import load_dataset
19
 
20
  from codetiming import Timer
21
 
22
- for binding in ["dict_PDF", "dict_annotations (new)"]:
23
  with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
24
  if binding == "dict_annotations (new)":
25
- ds = load_dataset("../DUDE_loader/DUDE_loader.py")
26
  else:
27
  ds = load_dataset("jordyvl/DUDE_loader", revision='db20bbf751b14e14e8143170bc201948ef5ac83c')
28
 
 
19
 
20
  from codetiming import Timer
21
 
22
+ for binding in ["dict_annotations (new)"]: #"dict_PDF",
23
  with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
24
  if binding == "dict_annotations (new)":
25
+ ds = load_dataset("../DUDE_loader/DUDE_loader.py", data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries", writer_batch_size=10) #ignore_verifications=True,
26
  else:
27
  ds = load_dataset("jordyvl/DUDE_loader", revision='db20bbf751b14e14e8143170bc201948ef5ac83c')
28