Datasets:
wkrl
/

Sub-tasks:
parsing
Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
cord / cord.py
wkrl's picture
Init load script
8e8f478
"""CORD: A Consolidated Receipt Dataset for Post-OCR Parsing"""
import json
import os
from pathlib import Path
from typing import Any, Generator
import datasets
from PIL import Image
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{park2019cord,
title={CORD: A Consolidated Receipt Dataset for Post-OCR Parsing},
author={Park, Seunghyun and Shin, Seung and Lee, Bado and Lee, Junyeop and Surh, Jaeheung and Seo, Minjoon and Lee, Hwalsuk}
booktitle={Document Intelligence Workshop at Neural Information Processing Systems}
year={2019}
}
"""
_DESCRIPTION = """\
CORD (Consolidated Receipt Dataset) with normalized bounding boxes.
"""
_URLS = [
"https://drive.google.com/uc?id=1MqhTbcj-AHXOqYoeoh12aRUwIprzTJYI",
"https://drive.google.com/uc?id=1wYdp5nC9LnHQZ2FcmOoC0eClyWvcuARU",
]
_LABELS = [
"menu.cnt",
"menu.discountprice",
"menu.etc",
"menu.itemsubtotal",
"menu.nm",
"menu.num",
"menu.price",
"menu.sub_cnt",
"menu.sub_etc",
"menu.sub_nm",
"menu.sub_price",
"menu.sub_unitprice",
"menu.unitprice",
"menu.vatyn",
"sub_total.discount_price",
"sub_total.etc",
"sub_total.othersvc_price",
"sub_total.service_price",
"sub_total.subtotal_price",
"sub_total.tax_price",
"total.cashprice",
"total.changeprice",
"total.creditcardprice",
"total.emoneyprice",
"total.menuqty_cnt",
"total.menutype_cnt",
"total.total_etc",
"total.total_price",
"void_menu.nm",
"void_menu.price",
]
def load_image(image_path: str) -> tuple:
image = Image.open(image_path).convert("RGB")
return image, image.size
def quad_to_bbox(quad: dict) -> list:
return [
quad["x3"],
quad["y1"],
quad["x1"],
quad["y3"],
]
def normalize_bbox(bbox: list, width: int, height: int) -> list:
return [
int(1000 * (bbox[0] / width)),
int(1000 * (bbox[1] / height)),
int(1000 * (bbox[2] / width)),
int(1000 * (bbox[3] / height)),
]
class CORDConfig(datasets.BuilderConfig):
"""BuilderConfig for CORD."""
def __init__(self, **kwargs) -> None:
"""BuilderConfig for CORD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CORDConfig, self).__init__(**kwargs)
class CORD(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CORDConfig(
name="CORD",
version=datasets.Version("1.0.0"),
description="CORD (Consolidated Receipt Dataset)",
),
]
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"words": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(
datasets.Sequence(datasets.Value("int64"))
),
"labels": datasets.Sequence(
datasets.features.ClassLabel(names=_LABELS)
),
"images": datasets.features.Image(),
}
),
citation=_CITATION,
homepage="https://github.com/clovaai/cord/",
)
def _split_generators(self, dl_manager) -> list:
base_dir_v1, base_dir_v2 = dl_manager.download_and_extract(_URLS)
dest_dir = Path(base_dir_v1) / "CORD"
for split_dir in ["train", "dev", "test"]:
for type_dir in ["image", "json"]:
if split_dir == "test" and type_dir == "json":
continue
files = (Path(base_dir_v2) / "CORD" / split_dir / type_dir).iterdir()
for f in files:
os.rename(f, dest_dir / split_dir / type_dir / f.name)
return [
datasets.SplitGenerator(
name=str(datasets.Split.TRAIN), gen_kwargs={"filepath": dest_dir / "train"}
),
datasets.SplitGenerator(
name=str(datasets.Split.VALIDATION), gen_kwargs={"filepath": dest_dir / "dev"},
),
datasets.SplitGenerator(
name=str(datasets.Split.TEST), gen_kwargs={"filepath": dest_dir / "test"}
),
]
def _generate_examples(self, **kwargs: Any) -> Generator:
filepath = kwargs["filepath"]
logger.info("generating examples from = %s", filepath)
ann_dir = os.path.join(filepath, "json")
img_dir = os.path.join(filepath, "image")
for guid, file in enumerate(sorted(os.listdir(ann_dir))):
WORDS, BBOXES, LABELS = [], [], []
file_path = os.path.join(ann_dir, file)
f = open(file_path)
data = json.load(f)
image_path = os.path.join(img_dir, file).replace("json", "png")
image, (width, height) = load_image(image_path)
for annotation in data["valid_line"]:
label, words = annotation["category"], annotation["words"]
for word in words:
bbox = normalize_bbox(
quad_to_bbox(word["quad"]), width=width, height=height
)
if min(bbox) >= 0 and max(bbox) <= 1000:
WORDS.append(word["text"])
BBOXES.append(bbox)
LABELS.append(label)
yield guid, {
"id": str(guid),
"images": image,
"words": WORDS,
"bboxes": BBOXES,
"labels": LABELS,
}