Datasets:
wkrl
/

Sub-tasks:
parsing
Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
wkrl commited on
Commit
1449248
1 Parent(s): 8e8f478

Delete load_script.py

Browse files
Files changed (1) hide show
  1. load_script.py +0 -186
load_script.py DELETED
@@ -1,186 +0,0 @@
1
- """CORD: A Consolidated Receipt Dataset for Post-OCR Parsing"""
2
-
3
-
4
- import json
5
- import os
6
- from pathlib import Path
7
- from typing import Any, Generator
8
-
9
- import datasets
10
- from PIL import Image
11
-
12
- logger = datasets.logging.get_logger(__name__)
13
-
14
-
15
- _CITATION = """\
16
- @article{park2019cord,
17
- title={CORD: A Consolidated Receipt Dataset for Post-OCR Parsing},
18
- author={Park, Seunghyun and Shin, Seung and Lee, Bado and Lee, Junyeop and Surh, Jaeheung and Seo, Minjoon and Lee, Hwalsuk}
19
- booktitle={Document Intelligence Workshop at Neural Information Processing Systems}
20
- year={2019}
21
- }
22
- """
23
-
24
- _DESCRIPTION = """\
25
- CORD (Consolidated Receipt Dataset) with normalized bounding boxes.
26
- """
27
-
28
- _URLS = [
29
- "https://drive.google.com/uc?id=1MqhTbcj-AHXOqYoeoh12aRUwIprzTJYI",
30
- "https://drive.google.com/uc?id=1wYdp5nC9LnHQZ2FcmOoC0eClyWvcuARU",
31
- ]
32
-
33
- _LABELS = [
34
- "menu.cnt",
35
- "menu.discountprice",
36
- "menu.etc",
37
- "menu.itemsubtotal",
38
- "menu.nm",
39
- "menu.num",
40
- "menu.price",
41
- "menu.sub_cnt",
42
- "menu.sub_etc",
43
- "menu.sub_nm",
44
- "menu.sub_price",
45
- "menu.sub_unitprice",
46
- "menu.unitprice",
47
- "menu.vatyn",
48
- "sub_total.discount_price",
49
- "sub_total.etc",
50
- "sub_total.othersvc_price",
51
- "sub_total.service_price",
52
- "sub_total.subtotal_price",
53
- "sub_total.tax_price",
54
- "total.cashprice",
55
- "total.changeprice",
56
- "total.creditcardprice",
57
- "total.emoneyprice",
58
- "total.menuqty_cnt",
59
- "total.menutype_cnt",
60
- "total.total_etc",
61
- "total.total_price",
62
- "void_menu.nm",
63
- "void_menu.price",
64
- ]
65
-
66
-
67
- def load_image(image_path: str) -> tuple:
68
- image = Image.open(image_path).convert("RGB")
69
- return image, image.size
70
-
71
-
72
- def quad_to_bbox(quad: dict) -> list:
73
- return [
74
- quad["x3"],
75
- quad["y1"],
76
- quad["x1"],
77
- quad["y3"],
78
- ]
79
-
80
-
81
- def normalize_bbox(bbox: list, width: int, height: int) -> list:
82
- return [
83
- int(1000 * (bbox[0] / width)),
84
- int(1000 * (bbox[1] / height)),
85
- int(1000 * (bbox[2] / width)),
86
- int(1000 * (bbox[3] / height)),
87
- ]
88
-
89
-
90
- class CORDConfig(datasets.BuilderConfig):
91
- """BuilderConfig for CORD."""
92
-
93
- def __init__(self, **kwargs) -> None:
94
- """BuilderConfig for CORD.
95
- Args:
96
- **kwargs: keyword arguments forwarded to super.
97
- """
98
- super(CORDConfig, self).__init__(**kwargs)
99
-
100
-
101
- class CORD(datasets.GeneratorBasedBuilder):
102
- BUILDER_CONFIGS = [
103
- CORDConfig(
104
- name="CORD",
105
- version=datasets.Version("1.0.0"),
106
- description="CORD (Consolidated Receipt Dataset)",
107
- ),
108
- ]
109
-
110
- def _info(self) -> datasets.DatasetInfo:
111
- return datasets.DatasetInfo(
112
- description=_DESCRIPTION,
113
- features=datasets.Features(
114
- {
115
- "id": datasets.Value("string"),
116
- "words": datasets.Sequence(datasets.Value("string")),
117
- "bboxes": datasets.Sequence(
118
- datasets.Sequence(datasets.Value("int64"))
119
- ),
120
- "labels": datasets.Sequence(
121
- datasets.features.ClassLabel(names=_LABELS)
122
- ),
123
- "images": datasets.features.Image(),
124
- }
125
- ),
126
- citation=_CITATION,
127
- homepage="https://github.com/clovaai/cord/",
128
- )
129
-
130
- def _split_generators(self, dl_manager) -> list:
131
- base_dir_v1, base_dir_v2 = dl_manager.download_and_extract(_URLS)
132
- dest_dir = Path(base_dir_v1) / "CORD"
133
-
134
- for split_dir in ["train", "dev", "test"]:
135
- for type_dir in ["image", "json"]:
136
- if split_dir == "test" and type_dir == "json":
137
- continue
138
- files = (Path(base_dir_v2) / "CORD" / split_dir / type_dir).iterdir()
139
- for f in files:
140
- os.rename(f, dest_dir / split_dir / type_dir / f.name)
141
-
142
- return [
143
- datasets.SplitGenerator(
144
- name=str(datasets.Split.TRAIN), gen_kwargs={"filepath": dest_dir / "train"}
145
- ),
146
- datasets.SplitGenerator(
147
- name=str(datasets.Split.VALIDATION), gen_kwargs={"filepath": dest_dir / "dev"},
148
- ),
149
- datasets.SplitGenerator(
150
- name=str(datasets.Split.TEST), gen_kwargs={"filepath": dest_dir / "test"}
151
- ),
152
- ]
153
-
154
- def _generate_examples(self, **kwargs: Any) -> Generator:
155
- filepath = kwargs["filepath"]
156
- logger.info("generating examples from = %s", filepath)
157
- ann_dir = os.path.join(filepath, "json")
158
- img_dir = os.path.join(filepath, "image")
159
-
160
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
161
- WORDS, BBOXES, LABELS = [], [], []
162
- file_path = os.path.join(ann_dir, file)
163
- f = open(file_path)
164
- data = json.load(f)
165
-
166
- image_path = os.path.join(img_dir, file).replace("json", "png")
167
- image, (width, height) = load_image(image_path)
168
-
169
- for annotation in data["valid_line"]:
170
- label, words = annotation["category"], annotation["words"]
171
- for word in words:
172
- bbox = normalize_bbox(
173
- quad_to_bbox(word["quad"]), width=width, height=height
174
- )
175
- if min(bbox) >= 0 and max(bbox) <= 1000:
176
- WORDS.append(word["text"])
177
- BBOXES.append(bbox)
178
- LABELS.append(label)
179
-
180
- yield guid, {
181
- "id": str(guid),
182
- "images": image,
183
- "words": WORDS,
184
- "bboxes": BBOXES,
185
- "labels": LABELS,
186
- }