jordyvl commited on
Commit
c6a2213
1 Parent(s): e8f9d4f

start of repo

Browse files
data/DUDE_sample_OCR.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84eb4a6c090a7b9d0c6d507de7417633de4865aa8769f1380026feea24d37f02
3
+ size 32141514
data/DUDE_sample_dataset.json ADDED
The diff for this file is too large to render. See raw diff
 
data/DUDE_sample_pdfs.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1dc13cbfd8ebf2aa27d1a10f8634955e1442f7c439afbd0a58a525f9bcdb04e
3
+ size 121373107
dude_loader.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """DUDE dataset loader"""
16
+
17
+ import os
18
+ import copy
19
+ import json
20
+ from pathlib import Path
21
+ from typing import List
22
+ import pdf2image
23
+ from tqdm import tqdm
24
+
25
+
26
+ import datasets
27
+
28
+
29
+ _CITATION = """
30
+ @inproceedings{dude2023icdar,
31
+ title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)},
32
+ author={Van Landeghem, Jordy et . al.},
33
+ booktitle={Proceedings of the ICDAR},
34
+ year={2023}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them.
40
+ Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason
41
+ over it to answer DUDE questions. DUDE Contains X questions and Y and ...
42
+ """
43
+
44
+ _HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"
45
+
46
+ _LICENSE = "CC BY 4.0"
47
+
48
+ _SPLITS = ["sample"] # ["train", "val", "test"]
49
+
50
+ _URLS = {}
51
+ for split in _SPLITS:
52
+ _URLS[
53
+ f"{split}_annotations"
54
+ ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_dataset.json"
55
+ _URLS[
56
+ f"{split}_pdfs"
57
+ ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_pdfs.tar.gz"
58
+ _URLS[
59
+ f"{split}_OCR"
60
+ ] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_OCR.tar.gz"
61
+
62
+
63
+ def batched_conversion(pdf_file):
64
+ info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
65
+ maxPages = info["Pages"]
66
+
67
+ logger.info(f"{pdf_file} has {str(maxPages)} pages")
68
+
69
+ images = []
70
+
71
+ for page in range(1, maxPages + 1, 10):
72
+ images.extend(
73
+ pdf2image.convert_from_path(
74
+ pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
75
+ )
76
+ )
77
+ return images
78
+
79
+
80
+ def open_pdf_binary(pdf_file):
81
+ with open(pdf_file, "rb") as f:
82
+ return f.read()
83
+
84
+
85
+ class DUDE(datasets.GeneratorBasedBuilder):
86
+ """DUDE dataset."""
87
+
88
+ BUILDER_CONFIGS = [
89
+ datasets.BuilderConfig(
90
+ name="DUDE",
91
+ version=datasets.Version("0.0.1"),
92
+ description=_DESCRIPTION,
93
+ )
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = "DUDE"
97
+
98
+ def _info(self):
99
+
100
+ features = datasets.Features(
101
+ {
102
+ "docId": datasets.Value("string"),
103
+ "questionId": datasets.Value("string"),
104
+ "question": datasets.Value("string"),
105
+ "answers": datasets.Sequence(datasets.Value("string")),
106
+ # '''
107
+ # "answers_page_bounding_boxes": datasets.Sequence(
108
+ # {
109
+ # "left": datasets.Value("int32"),
110
+ # "top": datasets.Value("int32"),
111
+ # "width": datasets.Value("int32"),
112
+ # "height": datasets.Value("int32"),
113
+ # "page": datasets.Value("int32"),
114
+ # }
115
+ # ),
116
+ # '''
117
+ "answers_variants": datasets.Sequence(datasets.Value("string")),
118
+ "answer_type": datasets.Value("string"),
119
+ "data_split": datasets.Value("string"),
120
+ "document": datasets.Value("binary"),
121
+ "OCR": datasets.Value("binary"),
122
+ }
123
+ )
124
+
125
+ return datasets.DatasetInfo(
126
+ description=_DESCRIPTION,
127
+ features=features,
128
+ supervised_keys=None,
129
+ homepage=_HOMEPAGE,
130
+ license=_LICENSE,
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(
135
+ self, dl_manager: datasets.DownloadManager
136
+ ) -> List[datasets.SplitGenerator]:
137
+
138
+ splits = []
139
+ for split in _SPLITS:
140
+ annotations = {}
141
+ if f"{split}_annotations" in _URLS: # blind test set
142
+ annotations = json.load(open(_URLS[f"{split}_annotations"], "r"))
143
+ pdfs_archive_path = dl_manager.download(_URLS[f"{split}_pdfs"])
144
+ pdfs_archive = dl_manager.iter_archive(pdfs_archive_path)
145
+ OCR_archive_path = dl_manager.download(_URLS[f"{split}_OCR"])
146
+ OCR_archive = dl_manager.iter_archive(OCR_archive_path)
147
+ splits.append(
148
+ datasets.SplitGenerator(
149
+ name=split,
150
+ gen_kwargs={
151
+ "pdfs_archive": pdfs_archive,
152
+ "OCR_archive": OCR_archive,
153
+ "annotations": annotations,
154
+ "split": split,
155
+ },
156
+ )
157
+ )
158
+ return splits
159
+
160
+ def _generate_examples(self, pdfs_archive, OCR_archive, annotations, split):
161
+ question = self.info.features["question"]
162
+ answers = self.info.features["answers"]
163
+
164
+ extensions = {"pdf", "PDF"}
165
+
166
+ # need to iterate over questions and retrieve doc_id from pdfs_archive and OCR_archive
167
+ docId_to_bin = {annotation["docId"]: "" for annotation in annotations}
168
+ docId_to_OCR = copy.deepcopy(docId_to_bin)
169
+
170
+ for file_path, file_obj in pdfs_archive:
171
+
172
+ path, ext = file_path.split(".")
173
+ md5 = path.split("/")[-1]
174
+
175
+ if ext not in extensions: # metadata.jsonlines
176
+ continue
177
+
178
+ if md5 in docId_to_bin:
179
+ # images = pdf2image.convert_from_bytes(file_obj.read())
180
+ docId_to_bin[md5] = file_obj.read() # binary
181
+
182
+ # @Sanket: here the same for OCR
183
+
184
+ for file_path, file_obj in OCR_archive:
185
+ # /DUDE_sample_OCR/OCR/Amazon Textract/md5_{original,due}.json
186
+
187
+ path, ext = file_path.split(".")
188
+ filename = path.split("/")[-1]
189
+ md5 = filename.split("_")[0]
190
+
191
+ if md5 in docId_to_OCR and "original" in filename:
192
+ docId_to_OCR[md5] = file_obj.read() # binary
193
+
194
+ for i, a in enumerate(annotations):
195
+ a["data_split"] = split
196
+
197
+ # need to yield actual document not just id
198
+ a["document"] = docId_to_bin[a["docId"]]
199
+ a["OCR"] = docId_to_OCR[a["docId"]]
200
+ a.pop("answers_page_bounding_boxes")
201
+
202
+ yield i, a
test_loader.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """DUDE dataset loader"""
16
+
17
+
18
+ from datasets import load_dataset
19
+
20
+
21
+ ds = load_dataset("../DUDE_loader/dude_loader.py")
22
+
23
+ import pdb; pdb.set_trace() # breakpoint 05751153 //