ocr-barcodes-detection / ocr-barcodes-detection.py
Vadzim Kashko
docs: add script description
a2a30a6
from xml.etree import ElementTree as ET
import datasets
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {ocr-barcodes-detection},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The dataset consists of images of various grocery goods that have barcode labels.
Each image in the dataset is annotated with polygons around the barcode labels.
Additionally, Optical Character Recognition (**OCR**) has been performed on each
bounding box to extract the barcode numbers.
The dataset is particularly valuable for applications in *grocery retail, inventory
management, supply chain optimization, and automated checkout systems*. It serves as a
valuable resource for researchers, developers, and businesses working on barcode-related
projects in the retail and logistics domains.
"""
_NAME = "ocr-barcodes-detection"
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
_LABELS = ["Barcode"]
class OcrBarcodesDetection(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"name": datasets.Value("string"),
"image": datasets.Image(),
"mask": datasets.Image(),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"shapes": datasets.Sequence(
{
"label": datasets.ClassLabel(
num_classes=len(_LABELS),
names=_LABELS,
),
"type": datasets.Value("string"),
"points": datasets.Sequence(
datasets.Sequence(
datasets.Value("float"),
),
),
"rotation": datasets.Value("float"),
"occluded": datasets.Value("uint8"),
"attributes": datasets.Sequence(
{
"name": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}boxes.tar.gz")
annotations = dl_manager.download(f"{_DATA}annotations.xml")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"masks": masks,
"annotations": annotations,
},
),
]
@staticmethod
def parse_shape(shape: ET.Element) -> dict:
label = shape.get("label")
shape_type = shape.tag
rotation = shape.get("rotation", 0.0)
occluded = shape.get("occluded", 0)
points = None
if shape_type == "points":
points = tuple(map(float, shape.get("points").split(",")))
elif shape_type == "box":
points = [
(float(shape.get("xtl")), float(shape.get("ytl"))),
(float(shape.get("xbr")), float(shape.get("ybr"))),
]
elif shape_type == "polygon":
points = [
tuple(map(float, point.split(",")))
for point in shape.get("points").split(";")
]
attributes = []
for attr in shape:
attr_name = attr.get("name")
attr_text = attr.text
attributes.append({"name": attr_name, "text": attr_text})
shape_data = {
"label": label,
"type": shape_type,
"points": points,
"rotation": rotation,
"occluded": occluded,
"attributes": attributes,
}
return shape_data
def _generate_examples(self, images, masks, annotations):
tree = ET.parse(annotations)
root = tree.getroot()
for idx, (
(image_path, image),
(mask_path, mask),
) in enumerate(zip(images, masks)):
image_name = image_path.split("/")[-1]
img = root.find(f"./image[@name='images/{image_name}']")
image_id = img.get("id")
name = img.get("name")
width = img.get("width")
height = img.get("height")
shapes = [self.parse_shape(shape) for shape in img]
yield idx, {
"id": image_id,
"name": name,
"image": {"path": image_path, "bytes": image.read()},
"mask": {"path": mask_path, "bytes": mask.read()},
"width": width,
"height": height,
"shapes": shapes,
}