parking-space-detection-dataset / parking-space-detection-dataset.py
vkashko's picture
fix: script, docs: readme
54e6873
raw
history blame contribute delete
No virus
3.01 kB
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {parking-space-detection-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The dataset consists of images of parking spaces along with corresponding bounding box
masks. In order to facilitate object detection and localization, every parking space in
the images is annotated with a bounding box mask.
The bounding box mask outlines the boundary of the parking space, marking its position
and shape within the image. This allows for accurate identification and extraction of
individual parking spaces. Each parking spot is also labeled in accordance to its
occupancy: free, not free or partially free.
This dataset can be leveraged for a range of applications such as parking lot
management, autonomous vehicle navigation, smart city implementations, and traffic
analysis.
"""
_NAME = "parking-space-detection-dataset"
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class ParkingSpaceDetectionDataset(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"image": datasets.Image(),
"mask": datasets.Image(),
"bboxes": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
masks = dl_manager.download(f"{_DATA}boxes.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
masks = dl_manager.iter_archive(masks)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"masks": masks,
"annotations": annotations,
},
),
]
def _generate_examples(self, images, masks, annotations):
annotations_df = pd.read_csv(annotations)
for idx, ((image_path, image), (mask_path, mask)) in enumerate(
zip(images, masks)
):
yield idx, {
"id": annotations_df.loc[annotations_df["image_name"] == image_path][
"image_id"
].values[0],
"image": {"path": image_path, "bytes": image.read()},
"mask": {"path": mask_path, "bytes": mask.read()},
"bboxes": annotations_df.loc[
annotations_df["image_name"] == image_path
]["annotations"].values[0],
}