race-numbers-detection-and-ocr / race-numbers-detection-and-ocr.py
vkashko's picture
refactor: delete import
60700ab
from xml.etree import ElementTree as ET
import datasets
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {race-numbers-detection-and-ocr},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The dataset consists of photos of runners, participating in various races. Each photo
captures a runner wearing a race number on their attire.
The dataset provides **bounding boxes** annotations indicating the location of the race
number in each photo and includes corresponding OCR annotations, where the digit
sequences on the race numbers are transcribed.
This dataset combines the domains of sports, computer vision, and OCR technology,
providing a valuable resource for advancing the field of race number detection and OCR
in the context of athletic events.
"""
_NAME = "race-numbers-detection-and-ocr"
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
_LABELS = ["number"]
class BotoxInjectionsBeforeAndAfter(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"name": datasets.Value("string"),
"image": datasets.Image(),
"mask": datasets.Image(),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"shapes": datasets.Sequence(
{
"label": datasets.ClassLabel(
num_classes=len(_LABELS),
names=_LABELS,
),
"type": datasets.Value("string"),
"points": datasets.Sequence(
datasets.Sequence(
datasets.Value("float"),
),
),
"rotation": datasets.Value("float"),
"attributes": datasets.Sequence(
{
"name": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}boxes.tar.gz")
annotations = dl_manager.download(f"{_DATA}annotations.xml")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"masks": masks,
"annotations": annotations,
},
),
]
@staticmethod
def parse_shape(shape: ET.Element) -> dict:
label = shape.get("label")
shape_type = shape.tag
rotation = shape.get("rotation", 0.0)
points = None
if shape_type == "points":
points = tuple(map(float, shape.get("points").split(",")))
elif shape_type == "box":
points = [
(float(shape.get("xtl")), float(shape.get("ytl"))),
(float(shape.get("xbr")), float(shape.get("ybr"))),
]
elif shape_type == "polygon":
points = [
tuple(map(float, point.split(",")))
for point in shape.get("points").split(";")
]
attributes = []
for attr in shape:
attr_name = attr.get("name")
attr_text = attr.text
attributes.append({"name": attr_name, "text": attr_text})
shape_data = {
"label": label,
"type": shape_type,
"points": points,
"rotation": rotation,
"attributes": attributes,
}
return shape_data
def _generate_examples(self, images, masks, annotations):
tree = ET.parse(annotations)
root = tree.getroot()
for idx, (
(image_path, image),
(mask_path, mask),
) in enumerate(zip(images, masks)):
image_name = image_path.split("/")[-1]
img = root.find(f"./image[@name='{image_name}']")
image_id = img.get("id")
name = img.get("name")
width = img.get("width")
height = img.get("height")
shapes = [self.parse_shape(shape) for shape in img]
yield idx, {
"id": image_id,
"name": name,
"image": {"path": image_path, "bytes": image.read()},
"mask": {"path": mask_path, "bytes": mask.read()},
"width": width,
"height": height,
"shapes": shapes,
}