bdpc commited on
Commit
ed15331
1 Parent(s): 5fbe6d6

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +10 -0
  2. rvl_cdip_mp.py +160 -0
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ ---
4
+
5
+ # Dataset Card for RVL-CDIP_MultiPage
6
+
7
+ ## Extension
8
+
9
+ The data loader provides support for loading RVL_CDIP in its extended multipage format.
10
+ Since the dataset binaries are huge (80GB) it will be hosted elsewhere: <LINK>
rvl_cdip_mp.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """RVL-CDIP_mp (Ryerson Vision Lab Complex Document Information Processing) -Extended -Multipage dataset"""
16
+
17
+
18
+ import os
19
+ import datasets
20
+ from pathlib import Path
21
+ from typing import List
22
+ from tqdm import tqdm
23
+
24
+ datasets.logging.set_verbosity_info()
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+ MODE = "binary"
28
+
29
+ _CITATION = """
30
+
31
+ @inproceedings{bdpc,
32
+ title = {Beyond Document Page Classification},
33
+ author = {Anonymous},
34
+ booktitle = {Under Review},
35
+ year = {2023}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ The RVL-CDIP (Ryerson Vision Lab Complex Document Information Processing) dataset consists of originally retrieved documents in 16 classes.
41
+ There were +-500 documents from the original dataset that could not be retrieved based on the metadata or were corrupt in IDL.
42
+ """
43
+
44
+
45
+ _HOMEPAGE = "https://www.cs.cmu.edu/~aharley/rvl-cdip/"
46
+ _LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"
47
+
48
+
49
+ SOURCE = "bdpc/rvl_cdip_mp"
50
+
51
+ _BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP_pdf"
52
+
53
+ _CLASSES = [
54
+ "letter",
55
+ "form",
56
+ "email",
57
+ "handwritten",
58
+ "advertisement",
59
+ "scientific_report",
60
+ "scientific_publication",
61
+ "specification",
62
+ "file_folder",
63
+ "news_article",
64
+ "budget",
65
+ "invoice",
66
+ "presentation",
67
+ "questionnaire",
68
+ "resume",
69
+ "memo",
70
+ ]
71
+
72
+
73
+ def open_pdf_binary(pdf_file):
74
+ with open(pdf_file, "rb") as f:
75
+ return f.read()
76
+
77
+
78
+ class RvlCdipMp(datasets.GeneratorBasedBuilder):
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(
81
+ name="default",
82
+ version=datasets.Version("1.0.0", ""),
83
+ description="",
84
+ )
85
+ ]
86
+
87
+ def __init__(self, *args, examples_per_class=None, **kwargs):
88
+ super().__init__(*args, **kwargs)
89
+ # examples per class to stop generating
90
+ self.examples_per_class = examples_per_class
91
+
92
+ @property
93
+ def manual_download_instructions(self):
94
+ return (
95
+ "To use RVL-CDIP_multi you have to download it manually. Please extract all files in one folder and load the dataset with: "
96
+ "`datasets.load_dataset('bdpc/rvl_cdip_mp', data_dir='path/to/folder/folder_name')`"
97
+ )
98
+
99
+ def _info(self):
100
+ # DEFAULT_WRITER_BATCH_SIZE
101
+
102
+ folder = None
103
+ if isinstance(self.config.data_files, str):
104
+ folder = self.config.data_files # needs to be extracted cuz zip/tar
105
+ else:
106
+ if isinstance(self.config.data_dir, str):
107
+ folder = self.config.data_dir # contains the folder structure at someone local disk
108
+ else:
109
+ folder = _BACKOFF_folder # my local path, others should set data_dir or data_files
110
+ self.config.data_dir = folder
111
+
112
+ return datasets.DatasetInfo(
113
+ description=_DESCRIPTION,
114
+ features=datasets.Features(
115
+ {
116
+ "file": datasets.Value("binary"), # datasets.Sequence(datasets.Image()),
117
+ "labels": datasets.features.ClassLabel(names=_CLASSES),
118
+ }
119
+ ),
120
+ task_templates=None,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
124
+ if os.path.isdir(self.config.data_dir):
125
+ data_files = {
126
+ labelset: os.path.join(self.config.data_dir, labelset)
127
+ for labelset in sorted(os.listdir(self.config.data_dir), reverse=True)
128
+ if not "csv" in labelset
129
+ }
130
+
131
+ elif self.config.data_dir.endswith(".tar.gz"):
132
+ archive_path = dl_manager.download(self.config.data_dir)
133
+ data_files = dl_manager.iter_archive(archive_path)
134
+ raise NotImplementedError()
135
+ elif self.config.data_dir.endswith(".zip"):
136
+ archive_path = dl_manager.download_and_extract(self.config.data_dir)
137
+ data_files = dl_manager.iter_archive(archive_path)
138
+ raise NotImplementedError()
139
+
140
+ splits = []
141
+ for split_name, folder in data_files.items():
142
+ print(folder)
143
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"archive_path": folder}))
144
+ return splits
145
+
146
+ def _generate_examples(self, archive_path):
147
+ labels = self.info.features["labels"]
148
+
149
+ extensions = {".pdf", ".PDF"}
150
+
151
+ for i, path in tqdm(enumerate(Path(archive_path).glob("**/*/*")), desc=f"{archive_path}"):
152
+ if path.suffix in extensions:
153
+ try:
154
+ images = open_pdf_binary(path)
155
+ yield path.name, {
156
+ "file": images,
157
+ "labels": labels.encode_example(path.parent.name.lower()),
158
+ }
159
+ except Exception as e:
160
+ logger.warning(f"{e} failed to parse {i}")