dimun commited on
Commit
fb95f0f
1 Parent(s): 4d8a624

Create ExpirationDate.py

Browse files
Files changed (1) hide show
  1. ExpirationDate.py +135 -0
ExpirationDate.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ # import base64
4
+ from PIL import Image
5
+ import datasets
6
+
7
+ # Find for instance the citation on arxiv or on the dataset repo/website
8
+ _CITATION = """\
9
+ @article{seker2022generalized, title={A generalized framework for recognition of expiration dates on product packages using fully convolutional networks}, author={Seker, Ahmet Cagatay and Ahn, Sang Chul}, journal={Expert Systems with Applications}, pages={117310}, year={2022}, publisher={Elsevier} }
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ The dataset for Date detection in the proposed framework aims to provide annotated images that are relevant for training and evaluating models tasked with detecting dates within product labels or similar contexts.
14
+ """
15
+
16
+ _HOMEPAGE = "https://acseker.github.io/ExpDateWebsite/"
17
+
18
+ _LICENSE = "https://licenses.nuget.org/AFL-3.0"
19
+
20
+ _URLs = {
21
+ "products_synth": "https://huggingface.co/datasets/dimun/ExpirationDate/blob/main/Products-Synth.zip",
22
+ "products_real": "https://huggingface.co/datasets/dimun/ExpirationDate/blob/main/Products-Real.zip",
23
+ }
24
+
25
+
26
+ def load_image(image_path):
27
+ image = Image.open(image_path).convert("RGB")
28
+ w, h = image.size
29
+ return image, (w, h)
30
+
31
+
32
+ logger = datasets.logging.get_logger(__name__)
33
+
34
+
35
+ class ExpirationDate(datasets.GeneratorBasedBuilder):
36
+ VERSION = datasets.Version("0.0.1")
37
+
38
+ def _info(self):
39
+ features = datasets.Features(
40
+ {
41
+ "id": datasets.Value("string"),
42
+ "transcriptions": datasets.Sequence(datasets.Value("string")),
43
+ "bboxes_block": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
44
+ "categories": datasets.Sequence(
45
+ datasets.features.ClassLabel(
46
+ names=["prod", "date", "due", "code"])
47
+ ),
48
+ "image": datasets.features.Image(),
49
+ "width": datasets.Value("int32"),
50
+ "height": datasets.Value("int32")
51
+ }
52
+ )
53
+
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # Features/targets of the dataset
58
+ features=features,
59
+ # Homepage of the dataset for documentation
60
+ homepage=_HOMEPAGE,
61
+ # License for the dataset if available
62
+ license=_LICENSE,
63
+ # Citation for the dataset
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ """Returns SplitGenerators."""
69
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract files
70
+ # based on the provided URLs
71
+
72
+ archive_path = dl_manager.download_and_extract(_URLs)
73
+
74
+ return [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ # These kwargs will be passed to _generate_examples
78
+ gen_kwargs={
79
+ "filepath": os.path.join(archive_path["products_synth"], "Products-Synth/images/"),
80
+ "split": "train",
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ # These kwargs will be passed to _generate_examples
86
+ gen_kwargs={
87
+ "filepath": os.path.join(archive_path["products_real"], "Products-Real/"),
88
+ "split": "evaluation",
89
+ },
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TEST,
93
+ # These kwargs will be passed to _generate_examples
94
+ gen_kwargs={
95
+ "filepath": os.path.join(archive_path["products_real"], "Products-Real/"),
96
+ # Using train of products real as test
97
+ "split": "train",
98
+ },
99
+ ),
100
+ ]
101
+
102
+ def _generate_examples(self, filepath, split):
103
+ logger.info(f"⏳ Generating examples from = {filepath} to the split {split}")
104
+ ann_file = os.path.join(filepath, split, "annotations.json")
105
+
106
+ # get json
107
+ with open(ann_file, "r", encoding="utf8") as f:
108
+ features_map = json.load(f)
109
+
110
+ img_dir = os.path.join(filepath, split, "images")
111
+ img_listdir = os.listdir(img_dir)
112
+
113
+ for guid, filename in enumerate(img_listdir):
114
+ categories = []
115
+
116
+ image_features = features_map[filename]
117
+ image_ann = image_features.get("ann")
118
+
119
+ transcriptions = [box.get("transcription", "") for box in image_ann]
120
+ bboxes_block = [box.get("bbox") for box in image_ann]
121
+ categories = [box.get("cls") for box in image_ann]
122
+
123
+ # get image
124
+ image_path = os.path.join(img_dir, filename)
125
+ image, size = load_image(image_path)
126
+
127
+ yield guid, {
128
+ "id": filename,
129
+ "transcriptions": transcriptions,
130
+ "bboxes_block": bboxes_block,
131
+ "categories": categories,
132
+ "image": image,
133
+ "width": image_ann.get("width"),
134
+ "height": image_ann.get("height"),
135
+ }