rvl_cdip_n_mp / rvl_cdip_n_mp.py
jordyvl's picture
First version
5e15b58
raw
history blame
No virus
5.19 kB
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RVL-CDIP (Ryerson Vision Lab Complex Document Information Processing) dataset"""
import os
import datasets
from pathlib import Path
from tqdm import tqdm
import pdf2image
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_MODE = "binary"
_CITATION = """\
@inproceedings{larson2022evaluating,
title={Evaluating Out-of-Distribution Performance on Document Image Classifiers},
author={Larson, Stefan and Lim, Gordon and Ai, Yutong and Kuang, David and Leach, Kevin},
booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
year={2022}
}
@inproceedings{bdpc,
title = {Beyond Document Page Classification},
author = {Anonymous},
booktitle = {Under Review},
year = {2023}
}
"""
_DESCRIPTION = """\
The RVL-CDIP-N (Ryerson Vision Lab Complex Document Information Processing) dataset consists of newly gathered documents in 16 classes
There are 998 documents for testing purposes. There were 3 documents from the original dataset that could not be retrieved based on the metadata.
"""
_HOMEPAGE = "https://www.cs.cmu.edu/~aharley/rvl-cdip/"
_LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"
SOURCE = "jordyvl/rvl_cdip_mp"
_URL = f"https://huggingface.co/datasets/{SOURCE}/resolve/main/data.gz"
_BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP-NO/RVL-CDIP-N_pdf/data"
_CLASSES = [
"letter",
"form",
"email",
"handwritten",
"advertisement",
"scientific report",
"scientific publication",
"specification",
"file folder",
"news article",
"budget",
"invoice",
"presentation",
"questionnaire",
"resume",
"memo",
]
def batched_conversion(pdf_file):
info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
maxPages = info["Pages"]
logger.info(f"{pdf_file} has {str(maxPages)} pages")
images = []
for page in range(1, maxPages + 1, 10):
images.extend(
pdf2image.convert_from_path(pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages))
)
return images
def open_pdf_binary(pdf_file):
with open(pdf_file, "rb") as f:
return f.read()
class RvlCdipNMp(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "default"
def _info(self):
if isinstance(self.config.data_dir, str):
folder = self.config.data_dir # contains the folder structure at someone local disk
else:
if not os.path.exists(_BACKOFF_folder):
raise ValueError("No data folder found. Please set data_dir or data_files.")
folder = _BACKOFF_folder # my local path, others should set data_dir or data_files
self.config.data_dir = folder
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"file": datasets.Value("binary"),
"labels": datasets.features.ClassLabel(names=_CLASSES),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=None,
)
def _split_generators(self, dl_manager):
if self.config.data_dir.endswith(".tar.gz"):
archive_path = dl_manager.download(self.config.data_dir)
data_files = dl_manager.iter_archive(archive_path)
else:
data_files = self.config.data_dir
return [datasets.SplitGenerator(name="test", gen_kwargs={"archive_path": data_files})]
def _generate_examples(self, archive_path):
labels = self.info.features["labels"]
extensions = {".pdf", ".PDF"}
for i, path in tqdm(enumerate(Path(archive_path).glob("**/*")), desc=f"{archive_path}"):
if path.suffix in extensions:
try:
if _MODE == "binary":
images = open_pdf_binary(path)
# batched_conversion(path)
else:
images = path
a = dict(
id=path.name,
file=images,
labels=labels.encode_example(path.parent.name.lower()),
)
yield path.name, a
except Exception as e:
logger.warning(f"{e} failed to parse {i}")