# coding=utf-8 | |
# Permission is hereby granted, free of charge, to any person obtaining | |
# a copy of this software and associated documentation files (the | |
# "Software"), to deal in the Software without restriction, including | |
# without limitation the rights to use, copy, modify, merge, publish, | |
# distribute, sublicense, and/or sell copies of the Software, and to | |
# permit persons to whom the Software is furnished to do so, subject to | |
# the following conditions: | |
# The above copyright notice and this permission notice shall be | |
# included in all copies or substantial portions of the Software. | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | |
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
"""Carla-COCO-Object-Detection-Dataset""" | |
import collections | |
import json | |
import os | |
import datasets | |
_DESCRIPTION = """\ | |
This dataset contains 1028 images each 640x380 pixels. | |
The dataset is split into 249 test and 779 training examples. | |
Every image comes with MS COCO format annotations. | |
The dataset was collected in Carla Simulator, driving around in autopilot mode in various environments | |
(Town01, Town02, Town03, Town04, Town05) and saving every i-th frame. | |
The labels where then automatically generated using the semantic segmentation information. | |
""" | |
_HOMEPAGE = "https://github.com/yunusskeete/Carla-COCO-Object-Detection-Dataset" | |
_LICENSE = "MIT" | |
_URL = "https://huggingface.co/datasets/yunusskeete/Carla-COCO-Object-Detection-Dataset/resolve/main/Carla-COCO-Object-Detection-Dataset.tar.gz" | |
_CATEGORIES = ["automobile", "bike", "motorbike", "traffic_light", "traffic_sign"] | |
class CARLA_COCO(datasets.GeneratorBasedBuilder): | |
"""Carla-COCO-Object-Detection-Dataset""" | |
VERSION = datasets.Version("1.1.0") | |
def _info(self): | |
"""This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset""" | |
features = datasets.Features( | |
{ | |
"image_id": datasets.Value("int64"), | |
"image": datasets.Image(), | |
"width": datasets.Value("int32"), | |
"height": datasets.Value("int32"), | |
"file_name": datasets.Value("string"), | |
"license": datasets.Value(dtype="int32"), | |
"url": datasets.Value("string"), | |
"date_captured": datasets.Value("string"), | |
"objects": datasets.Sequence( | |
{ | |
"id": datasets.Value("int64"), | |
"area": datasets.Value("int64"), | |
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), | |
"category": datasets.ClassLabel(names=_CATEGORIES), | |
} | |
), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
) | |
def _split_generators(self, dl_manager): | |
"""This method is tasked with downloading/extracting the data and defining the splits depending on the configuration""" | |
archive = dl_manager.download(_URL) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"annotation_file_path": "annotations/train.json", | |
"files": dl_manager.iter_archive(archive), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"annotation_file_path": "annotations/test.json", | |
"files": dl_manager.iter_archive(archive), | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, annotation_file_path, files): | |
""" | |
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
""" | |
def process_annot(annot, category_id_to_category): | |
return { | |
"id": annot["id"], | |
"area": annot["area"], | |
"bbox": annot["bbox"], | |
"category": category_id_to_category[annot["category_id"]] + 1, | |
} | |
image_id_to_image = {} | |
idx = 0 | |
# This loop relies on the ordering of the files in the archive: | |
# Annotation files come first, then the images. | |
for path, f in files: | |
file_name = os.path.basename(path) | |
if path == annotation_file_path: | |
annotations = json.load(f) | |
category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]} | |
image_id_to_annotations = collections.defaultdict(list) | |
for annot in annotations["annotations"]: | |
image_id_to_annotations[annot["image_id"]].append(annot) | |
image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]} | |
elif file_name in image_id_to_image: | |
image = image_id_to_image[file_name] | |
objects = [ | |
process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]] | |
] | |
yield idx, { | |
"image_id": image["id"], | |
"image": {"path": path, "bytes": f.read()}, | |
"width": image["width"], | |
"height": image["height"], | |
"file_name": image["file_name"], | |
"license": image["license"], | |
"url": image["url"], | |
"date_captured": image["date_captured"], | |
"objects": objects, | |
} | |
idx += 1 | |
# class CARLA_COCO(datasets.GeneratorBasedBuilder): | |
# """Carla-COCO-Object-Detection-Dataset""" | |
# VERSION = datasets.Version("1.1.0") | |
# def _info(self): | |
# """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset""" | |
# features = datasets.Features( | |
# { | |
# "id": datasets.Value("int64"), | |
# "image_id": datasets.Value("string"), | |
# "image": datasets.Image(), | |
# "width": datasets.Value("int32"), | |
# "height": datasets.Value("int32"), | |
# "file_name": datasets.Value("string"), | |
# "url": datasets.Value("string"), | |
# "objects": datasets.Sequence( | |
# { | |
# "id": datasets.Sequence(datasets.Value("int64")), | |
# "area": datasets.Sequence(datasets.Value("int64")), | |
# "bbox": datasets.Sequence(datasets.Value("float32"), length=4), | |
# "category": datasets.Sequence(datasets.ClassLabel(names=_CATEGORIES)), | |
# } | |
# ), | |
# } | |
# ) | |
# return datasets.DatasetInfo( | |
# description=_DESCRIPTION, | |
# features=features, | |
# homepage=_HOMEPAGE, | |
# license=_LICENSE, | |
# ) | |
# def _split_generators(self, dl_manager): | |
# """This method is tasked with downloading/extracting the data and defining the splits depending on the configuration""" | |
# archive = dl_manager.download_and_extract(_URL) | |
# return [ | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TRAIN, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "annotation_file_path": "annotations/train.json", | |
# "files": dl_manager.iter_archive(archive), | |
# } | |
# ), | |
# datasets.SplitGenerator( | |
# name=datasets.Split.TEST, | |
# # These kwargs will be passed to _generate_examples | |
# gen_kwargs={ | |
# "annotation_file_path": "annotations/test.json", | |
# "files": dl_manager.iter_archive(archive), | |
# } | |
# ), | |
# ] | |
# # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
# def _generate_examples(self, annotation_file_path, files): | |
# """ | |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
# """ | |
# logger.info("generating examples from = %s", annotation_file_path) | |
# def process_annot(annot, category_id_to_category): | |
# return { | |
# "id": annot["id"], | |
# "area": annot["area"], | |
# "bbox": annot["bbox"], | |
# "category": category_id_to_category[annot["category_id"]], | |
# } | |
# image_id_to_image = {} | |
# idx = 0 | |
# # This loop relies on the ordering of the files in the archive: | |
# # Annotation files come first, then the images. | |
# for path, f in files: | |
# file_name = os.path.basename(path) | |
# if path == annotation_file_path: | |
# annotations = json.load(f) | |
# category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]} | |
# image_id_to_annotations = collections.defaultdict(list) | |
# for annot in annotations["annotations"]: | |
# image_id_to_annotations[annot["image_id"]].append(annot) | |
# image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]} | |
# elif file_name in image_id_to_image: | |
# image = image_id_to_image[file_name] | |
# objects = [ | |
# process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]] | |
# ] | |
# yield idx, { | |
# "image_id": image["id"], | |
# "image": {"path": path, "bytes": f.read()}, | |
# "width": image["width"], | |
# "height": image["height"], | |
# "objects": objects, | |
# } | |
# idx += 1 | |