minicoco / minicoco.py
fuliucansheng's picture
minicoco dataset
f6da417
import os
import json
import logging
import datasets
import xml.etree.ElementTree as ET
from collections import defaultdict
_CITATION = """
MINICOCO2017
"""
_DESCRIPTION = """
MINICOCO2017
"""
_URLS = {
"minicoco2017": "minicoco.tar.gz"
}
# fmt: off
CLASS_INFOS = [
# name id train
('person', 1, 0),
('bicycle', 2, 1),
('car', 3, 2),
('motorcycle', 4, 3),
('airplane', 5, 4),
('bus', 6, 5),
('train', 7, 6),
('truck', 8, 7),
('boat', 9, 8),
('traffic light', 10, 9),
('fire hydrant', 11, 10),
('stop sign', 13, 11),
('parking meter', 14, 12),
('bench', 15, 13),
('bird', 16, 14),
('cat', 17, 15),
('dog', 18, 16),
('horse', 19, 17),
('sheep', 20, 18),
('cow', 21, 19),
('elephant', 22, 20),
('bear', 23, 21),
('zebra', 24, 22),
('giraffe', 25, 23),
('backpack', 27, 24),
('umbrella', 28, 25),
('handbag', 31, 26),
('tie', 32, 27),
('suitcase', 33, 28),
('frisbee', 34, 29),
('skis', 35, 30),
('snowboard', 36, 31),
('sports ball', 37, 32),
('kite', 38, 33),
('baseball bat', 39, 34),
('baseball glove', 40, 35),
('skateboard', 41, 36),
('surfboard', 42, 37),
('tennis racket', 43, 38),
('bottle', 44, 39),
('wine glass', 46, 40),
('cup', 47, 41),
('fork', 48, 42),
('knife', 49, 43),
('spoon', 50, 44),
('bowl', 51, 45),
('banana', 52, 46),
('apple', 53, 47),
('sandwich', 54, 48),
('orange', 55, 49),
('broccoli', 56, 50),
('carrot', 57, 51),
('hot dog', 58, 52),
('pizza', 59, 53),
('donut', 60, 54),
('cake', 61, 55),
('chair', 62, 56),
('couch', 63, 57),
('potted plant', 64, 58),
('bed', 65, 59),
('dining table', 67, 60),
('toilet', 70, 61),
('tv', 72, 62),
('laptop', 73, 63),
('mouse', 74, 64),
('remote', 75, 65),
('keyboard', 76, 66),
('cell phone', 77, 67),
('microwave', 78, 68),
('oven', 79, 69),
('toaster', 80, 70),
('sink', 81, 71),
('refrigerator', 82, 72),
('book', 84, 73),
('clock', 85, 74),
('vase', 86, 75),
('scissors', 87, 76),
('teddy bear', 88, 77),
('hair drier', 89, 78),
('toothbrush', 90, 79)
]
KEYPOINTS_INFOS=[
# name id train
# ('nose', 1, 0),
# ('left_eye', 2, 1),
# ('right_eye', 3, 2),
# ('left_ear', 4, 3),
# ('right_ear', 5, 4),
# ('left_shoulder', 6, 5),
# ('right_shoulder', 7, 6),
# ('left_elbow', 8, 7),
# ('right_elbow', 9, 8),
# ('left_wrist', 10, 9),
# ('right_wrist', 11, 10),
# ('left_hip', 12, 11),
# ('right_hip', 13, 12),
# ('left_knee', 14, 13),
# ('right_knee', 15, 14),
# ('left_ankle', 16, 15),
# ('right_ankle', 17, 16)
('none', 1, 0),
('nose', 2, 1),
('left_eye', 3, 2),
('right_eye', 4, 3),
('left_ear', 5, 4),
('right_ear', 6, 5),
('left_shoulder', 7, 6),
('right_shoulder', 8, 7),
('left_elbow', 9, 8),
('right_elbow', 10, 9),
('left_wrist', 11, 10),
('right_wrist', 12, 11),
('left_hip', 13, 12),
('right_hip', 14, 13),
('left_knee', 15, 14),
('right_knee', 16, 15),
('left_ankle', 17, 16),
('right_ankle', 18, 17)
]
# fmt: on
CLASS_NAMES = [CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS]
KEYPOINTS_NAMES = [KEYPOINTS_INFO[0] for KEYPOINTS_INFO in KEYPOINTS_INFOS]
CLASS_DICT = {CLASS_INFO[0]: CLASS_INFO[2] for CLASS_INFO in CLASS_INFOS}
CATEGORY_ID2CLASS_NAMES = {CLASS_INFO[1]: CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS}
KEYPOINTS_DICT = {KEYPOINTS_INFO[0]: KEYPOINTS_INFO[1] for KEYPOINTS_INFO in KEYPOINTS_INFOS}
# datasets.Features
detection_features = datasets.Features(
{
"id": datasets.Value("int32"),
"image": datasets.Value("string"),
"height": datasets.Value("int32"),
"width": datasets.Value("int32"),
"objects": datasets.features.Sequence(
{
"bboxes": datasets.Sequence(datasets.Value("float32")),
"classes": datasets.features.ClassLabel(names=CLASS_NAMES),
}
),
}
)
segmentation_features = datasets.Features(
{
"id": datasets.Value("int32"),
"image": datasets.Value("string"),
"height": datasets.Value("int32"),
"width": datasets.Value("int32"),
"objects": datasets.features.Sequence(
{
"bboxes": datasets.Sequence(datasets.Value("float32")),
"classes": datasets.features.ClassLabel(names=CLASS_NAMES),
'segmentation':datasets.Sequence(datasets.Value("float32")),
'iscrowd':datasets.Value("int32"),
}
),
}
)
captions_features = datasets.Features(
{
"id": datasets.Value("int32"),
"image": datasets.Value("string"),
"height": datasets.Value("int32"),
"width": datasets.Value("int32"),
"captions": datasets.features.Sequence(datasets.Value("string")),
}
)
keypoint_features = datasets.Features(
# 这里可能有点问题,因为模型的keypoint的标注的类别没别没有增加进来,
# 有点复杂,后面再finetune,现在基本信息已经正确
{
"id": datasets.Value("int32"),
"image": datasets.Value("string"),
"height": datasets.Value("int32"),
"width": datasets.Value("int32"),
"objects": datasets.features.Sequence(
{
"bboxes": datasets.Sequence(datasets.Value("float32")),
"classes": datasets.features.ClassLabel(names=CLASS_NAMES),
'keypoints':datasets.Sequence(datasets.Value("float32")),
"num_keypoints":datasets.Value("int32")
}
),
}
)
_DATASET_FEATURES = {
"detection": detection_features,
"segmentation":segmentation_features,
"caption": captions_features,
"keypoint": keypoint_features
}
def get_captions_annotation(captions_path):
with open(captions_path,'r') as f:
anno_captions = json.load(f)
anno_infos = defaultdict(list)
images_infos = list()
for caption_info in anno_captions['annotations']:
# caption_info={'image_id': 179765, 'id': 38, 'caption': 'A black Honda motorcycle parked in front of a garage.'}
caption = caption_info['caption']
image_id = caption_info['image_id']
anno_infos[image_id].append(caption)
for image in anno_captions['images']:
# image={'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
images_infos.append({
"image_name":image['file_name'],
"height": image["height"],
"width":image["width"],
"image_id":image['id']
})
return anno_infos, images_infos
def get_instances_annotation(instances_path):
with open(instances_path,'r') as f:
anno_instances = json.load(f)
anno_infos = dict()
images_infos = list()
for instance_info in anno_instances['annotations']:
# instance_info = {'segmentation': [[510.66, 423.01, 511.72, 420.03, 510.45, 416.0, 510.34, 413.02,
# 510.77, 410.26, 510.77, 407.5, 510.34, 405.16, 511.51, 402.83, 511.41, 400.49, 510.24, 398.16,
# 509.39, 397.31, 504.61, 399.22, 502.17, 399.64, 500.89, 401.66, 500.47, 402.08, 499.09, 401.87,
# 495.79, 401.98, 490.59, 401.77, 488.79, 401.77, 485.39, 398.58, 483.9, 397.31, 481.56, 396.35,
# 478.48, 395.93, 476.68, 396.03, 475.4, 396.77, 473.92, 398.79, 473.28, 399.96, 473.49, 401.87,
# 474.56, 403.47, 473.07, 405.59, 473.39, 407.71, 476.68, 409.41, 479.23, 409.73, 481.56, 410.69,
# 480.4, 411.85, 481.35, 414.93, 479.86, 418.65, 477.32, 420.03, 476.04, 422.58, 479.02, 422.58,
# 480.29, 423.01, 483.79, 419.93, 486.66, 416.21, 490.06, 415.57, 492.18, 416.85, 491.65, 420.24,
# 492.82, 422.9, 493.56, 424.39, 496.43, 424.6, 498.02, 423.01, 498.13, 421.31, 497.07, 420.03,
# 497.07, 415.15, 496.33, 414.51, 501.1, 411.96, 502.06, 411.32, 503.02, 415.04, 503.33, 418.12,
# 501.1, 420.24, 498.98, 421.63, 500.47, 424.39, 505.03, 423.32, 506.2, 421.31, 507.69, 419.5,
# 506.31, 423.32, 510.03, 423.01, 510.45, 423.01]], 'area': 702.1057499999998, 'iscrowd': 0,
# 'image_id': 289343, 'bbox': [473.07, 395.93, 38.65, 28.67], 'category_id': 18, 'id': 1768}
bbox = instance_info['bbox']
image_id = instance_info['image_id']
segmentation = instance_info['segmentation'][0]
if image_id in anno_infos:
anno_infos[image_id].append(
{
"segmentation": segmentation,
"bbox": bbox,
'iscrowd':instance_info['iscrowd'],
"classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']]
}
)
else:
anno_infos[image_id]=[
{
"segmentation": segmentation,
"bbox": bbox,
'iscrowd':instance_info['iscrowd'],
"classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']]
}
]
for image in anno_instances['images']:
# image={'license': 4, 'file_name': '000000397133.jpg',
# 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg',
# 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52',
# 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
images_infos.append({
"image_name":image['file_name'],
"height": image["height"],
"width":image["width"],
"image_id":image['id']
})
return anno_infos, images_infos
def get_keypoints_annotation(keypoints_path):
with open(keypoints_path,'r') as f:
anno_keypoints = json.load(f)
anno_infos = dict()
images_infos = list()
for keypoint_info in anno_keypoints['annotations']:
# keypoint_info = {'segmentation': [[63.2, 229.21, 65.73, 208.99, 70.79, 187.92, 78.37, 162.64, 84.27, 146.63, 84.27, 132.3, 75.84, 109.55, 90.17, 97.75, 104.49, 96.91, 114.61, 102.81, 123.88, 123.88, 137.36, 136.52, 153.37, 150.84, 146.63, 169.38, 144.1, 180.34, 142.42, 190.45, 137.36, 209.83, 139.89, 230.9, 128.09, 232.58, 97.75, 235.11, 81.74, 237.64, 87.64, 208.99, 85.96, 186.24, 78.37, 198.88, 75.84, 224.16, 68.26, 239.33, 60.67, 230.9]], 'num_keypoints': 12, 'area': 8096.3096, 'iscrowd': 0, 'keypoints': [100, 135, 2, 102, 127, 2, 94, 131, 2, 112, 121, 2, 91, 132, 2, 137, 148, 2, 81, 158, 2, 150, 179, 1, 76, 193, 2, 0, 0, 0, 70, 234, 2, 136, 242, 1, 104, 246, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'image_id': 275749, 'bbox': [60.67, 96.91, 92.7, 142.42], 'category_id': 1, 'id': 232027}
bbox = keypoint_info['bbox']
image_id = keypoint_info['image_id']
if image_id in anno_infos:
anno_infos[image_id].append(
{
"bbox": bbox,
"classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']],
'keypoints':keypoint_info['keypoints'],
"num_keypoints":keypoint_info['num_keypoints'],
}
)
else:
anno_infos[image_id]=[
{
"bbox": bbox,
"classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']],
'keypoints':keypoint_info['keypoints'],
"num_keypoints":keypoint_info['num_keypoints'],
}
]
for image in anno_keypoints['images']:
# image={'license': 4, 'file_name': '000000397133.jpg', 'coco_url': 'http://images.cocodataset.org/val2017/000000397133.jpg', 'height': 427, 'width': 640, 'date_captured': '2013-11-14 17:02:52', 'flickr_url': 'http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg', 'id': 397133}
images_infos.append({
"image_name":image['file_name'],
"height": image["height"],
"width":image["width"],
"image_id":image['id']
})
return anno_infos, images_infos
class MINICOCOConfig(datasets.BuilderConfig):
def __init__(self, data_name, task_name, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
assert data_name in ["minicoco2017"] and task_name in [
"detection",
"segmentation",
"caption",
"keypoint"
]
self.data_name = data_name
self.task_name = task_name
class MiniCOCODataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MINICOCOConfig(
name="minicoco2017_detection",
version=datasets.Version("1.0.0", ""),
description="minicoco2017 detection dataset",
data_name="minicoco2017",
task_name="detection",
),
MINICOCOConfig(
name="minicoco2017_segmentation",
version=datasets.Version("1.0.0", ""),
description="minicoco2017 segmentation dataset",
data_name="minicoco2017",
task_name="segmentation",
),
MINICOCOConfig(
name="minicoco2017_caption",
version=datasets.Version("1.0.0", ""),
description="minicoco2017 caption dataset",
data_name="minicoco2017",
task_name="caption",
),
MINICOCOConfig(
name="minicoco2017_keypoint",
version=datasets.Version("1.0.0", ""),
description="minicoco2017 keypoint dataset",
data_name="minicoco2017",
task_name="keypoint",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=_DATASET_FEATURES[self.config.task_name],
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://fuliucansheng.github.io/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS[self.config.data_name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files, "split": "val"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files, "split": "test"},
),
]
def _generate_examples(self, filepath, split):
"""This function returns the examples in the raw (text) form."""
# filepath = os.path.join(filepath, os.listdir(filepath)[0]) # mine add
logging.info("generating examples from = %s, split = %s", filepath, split)
task_name = self.config.task_name
if task_name == "caption":
captions_path = os.path.join(filepath, "annotations", "captions_" + split + "2017.json")
anno_infos, images_infos = get_captions_annotation(captions_path)
for id_, image in enumerate(images_infos):
image_path = os.path.join(filepath, split + "2017", image["image_name"])
if not os.path.exists(image_path):
continue
example = {
"id": id_,
"image": os.path.abspath(image_path),
"height": image["height"],
"width": image["width"],
"captions": anno_infos[image['image_id']],
}
yield id_, example
elif task_name=="detection":
instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json")
anno_infos, images_infos = get_instances_annotation(instances_path)
for id_, image in enumerate(images_infos):
image_path = os.path.join(filepath, split + "2017", image["image_name"])
if not os.path.exists(image_path):
continue
example = {
"id": id_,
"image": os.path.abspath(image_path),
"height": image["height"],
"width": image["width"],
"objects":[
{
"bboxes": object_info["bbox"],
"classes": object_info["classes"]
}
for object_info in anno_infos[image['image_id']]
]
}
yield id_, example
elif task_name=="segmentation":
instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json")
anno_infos, images_infos = get_instances_annotation(instances_path)
for id_, image in enumerate(images_infos):
image_path = os.path.join(filepath, split + "2017", image["image_name"])
if not os.path.exists(image_path):
continue
example = {
"id": id_,
"image": os.path.abspath(image_path),
"height": image["height"],
"width": image["width"],
"objects":[
{
"bboxes": object_info["bbox"],
"classes": object_info["classes"],
'segmentation':object_info['segmentation'],
'iscrowd':object_info['iscrowd']
}
for object_info in anno_infos[image['image_id']]
]
}
yield id_, example
elif task_name=="keypoint":
keypoints_path = os.path.join(filepath, "annotations", "person_keypoints_" + split + "2017.json")
anno_infos, images_infos = get_keypoints_annotation(keypoints_path)
for id_, image in enumerate(images_infos):
image_path = os.path.join(filepath, split + "2017", image["image_name"])
if not os.path.exists(image_path):
continue
example = {
"id": id_,
"image": os.path.abspath(image_path),
"height": image["height"],
"width": image["width"],
"objects":[
{
"bboxes": object_info["bbox"],
"classes": object_info["classes"],
'keypoints':object_info['keypoints'],
"num_keypoints":object_info["num_keypoints"]
}
for object_info in anno_infos[image['image_id']]
]
}
yield id_, example