florianbussmann commited on
Commit
23c5c2c
1 Parent(s): 8ae27f3

Add dataset loading script for train tickets dataset

Browse files
Files changed (1) hide show
  1. train_tickets-yu2020pick.py +191 -0
train_tickets-yu2020pick.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+ import csv
5
+ import re
6
+
7
+ import datasets
8
+
9
+ from PIL import Image
10
+ import numpy as np
11
+
12
+ logger = datasets.logging.get_logger(__name__)
13
+
14
+
15
+ _CITATION = """\
16
+ @inproceedings{yu2021pick,
17
+ title={PICK: Processing key information extraction from documents using improved graph learning-convolutional networks},
18
+ author={Yu, Wenwen and Lu, Ning and Qi, Xianbiao and Gong, Ping and Xiao, Rong},
19
+ booktitle={2020 25th International Conference on Pattern Recognition (ICPR)},
20
+ pages={4363--4370},
21
+ year={2021},
22
+ organization={IEEE}
23
+ }
24
+ """
25
+ _DESCRIPTION = """\
26
+ The train ticket is fixed layout dataset, however, it contains background noise and imaging distortions.
27
+ It contains 1,530 synthetic images and 320 real images for training, and 80 real images for testing.
28
+ Every train ticket has eight key text fields including ticket number, starting station, train number, destination station, date, ticket rates, seat category, and name.
29
+ This dataset mainly consists of digits, English characters, and Chinese characters.
30
+ """
31
+
32
+ _URL = """\
33
+ https://drive.google.com/file/d/1o8JktPD7bS74tfjz-8dVcZq_uFS6YEGh/view?usp=sharing
34
+ """
35
+
36
+
37
+ def load_image(image_path):
38
+ image = Image.open(image_path).convert("RGB")
39
+ w, h = image.size
40
+ return image, (w, h)
41
+
42
+
43
+ def normalize_bbox(bbox, size):
44
+ return [
45
+ int(1000 * bbox[0] / size[0]),
46
+ int(1000 * bbox[1] / size[1]),
47
+ int(1000 * bbox[2] / size[0]),
48
+ int(1000 * bbox[3] / size[1]),
49
+ ]
50
+
51
+
52
+ class TrainTicketsConfig(datasets.BuilderConfig):
53
+ """BuilderConfig for train_tickets"""
54
+
55
+ def __init__(self, **kwargs):
56
+ """BuilderConfig for train_tickets.
57
+
58
+ Args:
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super(TrainTicketsConfig, self).__init__(**kwargs)
62
+
63
+
64
+ class TrainTickets(datasets.GeneratorBasedBuilder):
65
+ """train tickets"""
66
+
67
+ BUILDER_CONFIGS = [
68
+ TrainTicketsConfig(
69
+ name="train_tickets-yu2020pick",
70
+ version=datasets.Version("1.0.0"),
71
+ description="Chinese train tickets",
72
+ ),
73
+ ]
74
+
75
+ def _info(self):
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=datasets.Features(
79
+ {
80
+ "id": datasets.Value("string"),
81
+ "words": datasets.Sequence(datasets.Value("string")),
82
+ "bboxes": datasets.Sequence(
83
+ datasets.Sequence(datasets.Value("int64"))
84
+ ),
85
+ "ner_tags": datasets.Sequence(
86
+ datasets.features.ClassLabel(
87
+ names=[
88
+ "O",
89
+ "S-DATE",
90
+ "S-DESTINATION_STATION",
91
+ "S-NAME",
92
+ "S-SEAT_CATEGORY",
93
+ "S-STARTING_STATION",
94
+ "S-TICKET_NUM",
95
+ "S-TICKET_RATES",
96
+ "S-TRAIN_NUM",
97
+ ]
98
+ )
99
+ ),
100
+ "image_path": datasets.Value("string"),
101
+ }
102
+ ),
103
+ supervised_keys=None,
104
+ homepage="https://github.com/wenwenyu/PICK-pytorch",
105
+ citation=_CITATION,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ """Returns SplitGenerators."""
110
+ downloaded_file = dl_manager.download_and_extract(
111
+ "https://drive.google.com/uc?export=download&id=1o8JktPD7bS74tfjz-8dVcZq_uFS6YEGh"
112
+ )
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "filelist": f"{downloaded_file}/train_tickets/synth1530_real320_baseline_trainset.csv"
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ gen_kwargs={
123
+ "filelist": f"{downloaded_file}/train_tickets/real80_baseline_testset.csv"
124
+ },
125
+ ),
126
+ ]
127
+
128
+ # based on https://github.com/wenwenyu/PICK-pytorch/blob/master/data_utils/documents.py#L229
129
+ def _read_gt_file_with_box_entity_type(self, filepath: str):
130
+ with open(filepath, "r", encoding="utf-8") as f:
131
+ document_text = f.read()
132
+
133
+ # match pattern in document: index,x1,y1,x2,y2,x3,y3,x4,y4,transcript,box_entity_type
134
+ regex = (
135
+ r"^\s*(-?\d+)\s*,\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,"
136
+ r"\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,\s*(-?\d+\.?\d*)\s*,(.*),(.*)\n?$"
137
+ )
138
+
139
+ matches = re.finditer(regex, document_text, re.MULTILINE)
140
+
141
+ res = []
142
+ for _, match in enumerate(matches, start=1):
143
+ points = [int(match.group(i)) for i in range(2, 10)]
144
+ x = points[0:8:2]
145
+ y = points[1:8:2]
146
+ x1 = min(x)
147
+ y1 = min(y)
148
+ x2 = max(x)
149
+ y2 = max(y)
150
+ transcription = str(match.group(10))
151
+ entity_type = str(match.group(11))
152
+ res.append((x1, y1, x2, y2, transcription, entity_type))
153
+ return res
154
+
155
+ def _generate_examples(self, filelist):
156
+ logger.info("⏳ Generating examples from = %s", filelist)
157
+
158
+ ann_dir = os.path.join(os.path.dirname(filelist), "boxes_trans")
159
+ img_dir = os.path.join(os.path.dirname(filelist), "images1930")
160
+ print(ann_dir)
161
+
162
+ with open(filelist) as csv_file:
163
+ csv_reader = csv.reader(csv_file, delimiter=",")
164
+ for row in csv_reader:
165
+ guid = row[0]
166
+ # document_type = row[1]
167
+ filename = row[2]
168
+
169
+ words = []
170
+ bboxes = []
171
+ ner_tags = []
172
+ file_path = os.path.join(ann_dir, f"{filename}.tsv")
173
+ data = self._read_gt_file_with_box_entity_type(file_path)
174
+ image_path = os.path.join(img_dir, f"{filename}.jpg")
175
+ _, size = load_image(image_path)
176
+ for item in data:
177
+ box = item[0:4]
178
+ transcription, label = item[4:6]
179
+ words.append(transcription)
180
+ bboxes.append(normalize_bbox(box, size))
181
+ if label == "other":
182
+ ner_tags.append("O")
183
+ else:
184
+ ner_tags.append("S-" + label.upper())
185
+ yield guid, {
186
+ "id": str(guid),
187
+ "words": words,
188
+ "bboxes": bboxes,
189
+ "ner_tags": ner_tags,
190
+ "image_path": image_path,
191
+ }