|
import itertools |
|
import logging |
|
import os.path as osp |
|
import tempfile |
|
from collections import OrderedDict |
|
|
|
import mmcv |
|
import numpy as np |
|
import pycocotools |
|
from mmcv.utils import print_log |
|
from pycocotools.coco import COCO |
|
|
|
from .cocoeval import COCOeval |
|
from terminaltables import AsciiTable |
|
|
|
from mmdet.core import eval_recalls |
|
from .builder import DATASETS |
|
from mmdet.datasets.custom import CustomDataset |
|
|
|
import imagesize |
|
from concurrent.futures import ProcessPoolExecutor |
|
import multiprocessing as mp |
|
from copy import deepcopy |
|
from tqdm import tqdm |
|
|
|
@DATASETS.register_module() |
|
class WaltDataset(CustomDataset): |
|
|
|
CLASSES = ('vehicle', 'occluded_vehicle', 'car', 'motorcycle', 'airplane', 'bus', |
|
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', |
|
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', |
|
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', |
|
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', |
|
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', |
|
'baseball glove', 'skateboard', 'surfboard', 'tennis racket', |
|
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', |
|
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', |
|
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', |
|
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', |
|
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', |
|
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', |
|
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') |
|
|
|
def load_annotations(self, ann_file): |
|
"""Load annotation from COCO style annotation file. |
|
|
|
Args: |
|
ann_file (str): Path of annotation file. |
|
|
|
Returns: |
|
list[dict]: Annotation info from COCO api. |
|
""" |
|
if not getattr(pycocotools, '__version__', '0') >= '12.0.2': |
|
raise AssertionError( |
|
'Incompatible version of pycocotools is installed. ' |
|
'Run pip uninstall pycocotools first. Then run pip ' |
|
'install mmpycocotools to install open-mmlab forked ' |
|
'pycocotools.') |
|
import os.path |
|
print(ann_file + 'ann.json') |
|
if not os.path.exists(ann_file + 'ann.json'): |
|
self.save_json(ann_file) |
|
|
|
self.coco = COCO(ann_file + 'ann.json') |
|
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) |
|
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} |
|
self.img_ids = self.coco.get_img_ids() |
|
data_infos = [] |
|
total_ann_ids = [] |
|
for i in self.img_ids: |
|
info = self.coco.load_imgs([i])[0] |
|
info['filename'] = info['file_name'] |
|
data_infos.append(info) |
|
ann_ids = self.coco.get_ann_ids(img_ids=[i]) |
|
total_ann_ids.extend(ann_ids) |
|
assert len(set(total_ann_ids)) == len( |
|
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" |
|
return data_infos |
|
|
|
def save_json(self, ann_file): |
|
import glob |
|
import cv2 |
|
import time |
|
data = {} |
|
|
|
data["info"] = { |
|
'url': "https://www.andrew.cmu.edu/user/dnarapur/", |
|
'year': 2018, |
|
'date_created': time.strftime("%a, %d %b %Y %H:%M:%S +0000", |
|
time.localtime()), |
|
'description': "This is a dataset for occlusion detection.", |
|
'version': '1.0', |
|
'contributor': 'CMU'} |
|
data["categories"] = [{'name': 'car','id': 0,'supercategory': 'car'}] |
|
data["licenses"] = [{'id': 1, |
|
'name': "unknown", |
|
'url': "unknown"}] |
|
data["images"] = [] |
|
data["annotations"] = [] |
|
|
|
|
|
self.data_infs = [] |
|
self.ann_file = ann_file |
|
|
|
count = 0 |
|
|
|
img_folder = ann_file |
|
|
|
for img_name in tqdm(sorted(glob.glob(img_folder + '/images/*')), desc="Converting CWALT to COCO format "): |
|
cam_name = img_folder.split('/')[-1] |
|
import imagesize |
|
width, height = imagesize.get(img_name) |
|
img_name = img_name.split('/')[-1] |
|
info = dict(license=3, height=height, width=width, file_name = img_name, date_captured = img_name.split('/')[-1].split('.')[0], id = count, filename = img_name, camname = cam_name) |
|
self.data_infs.append(info) |
|
|
|
data["images"].append({'flickr_url': "unknown", |
|
'coco_url': "unknown", |
|
|
|
'file_name': 'images/' +img_name, |
|
'id': count, |
|
'license':1, |
|
|
|
'date_captured': "unknown", |
|
'width': width, |
|
'height': height}) |
|
count = count+1 |
|
|
|
|
|
|
|
|
|
|
|
obj_id = 0 |
|
|
|
img_folder = ann_file |
|
with ProcessPoolExecutor(max_workers=mp.cpu_count()-1) as executor: |
|
img_names = glob.glob(img_folder + '/images/*') |
|
for ann_in, count in executor.map(self.get_ann_info_local, list(range(0, len(img_names)-1))): |
|
|
|
|
|
|
|
for loop in range(len(ann_in['bboxes'])): |
|
bbox = ann_in['bboxes'][loop] |
|
segmentation = ann_in['masks'][loop] |
|
|
|
data["annotations"].append({ |
|
'image_id': count, |
|
'category_id': 0, |
|
'iscrowd': 0, |
|
'occ_percentage': ann_in['occ_percentage'][loop], |
|
'id': obj_id, |
|
'area': int(bbox[2]*bbox[3]), |
|
'bbox': [int(bbox[0]), int(bbox[1]), int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])], |
|
'segmentation': [segmentation] |
|
}) |
|
obj_id = obj_id + 1 |
|
''' |
|
|
|
|
|
coco_kins=COCO('data/parking_real/kins/update_train_2020.json') |
|
catIds = [1,2]#coco_kins.getCatIds(catNms=['car']); |
|
imgIds = coco_kins.getImgIds(catIds=catIds ); |
|
|
|
count = 0 |
|
count_obj = 0 |
|
for id_1 in imgIds: |
|
img = coco_kins.loadImgs(id_1)[0] |
|
|
|
data["images"].append({'flickr_url': "unknown", |
|
'coco_url': "unknown", |
|
#'file_name': cam_name+'/images/' +img_name, |
|
'file_name': '../kins/'+img['file_name'], |
|
'id': 1000000+count, |
|
'license':1, |
|
#'has_visible_keypoints':True, |
|
'date_captured': "unknown", |
|
'width': img['width'], |
|
'height': img['height']}) |
|
annIds = coco_kins.getAnnIds(imgIds=id_1, catIds=catIds, iscrowd=None) |
|
for id_2 in annIds: |
|
ann = coco_kins.loadAnns(id_2) |
|
data["annotations"].append({ |
|
'image_id': 1000000+count, |
|
'category_id': 0, |
|
'iscrowd': 0, |
|
'occ_percentage': ann[0]['i_area']/ann[0]['a_area']*100, |
|
'id': 1000000+count_obj, |
|
'area': ann[0]['a_area'], |
|
'bbox': ann[0]['a_bbox'], |
|
'segmentation': [{'full':ann[0]['a_segm'],'visible':ann[0]['i_segm']}] |
|
}) |
|
count_obj = count_obj+1 |
|
count= count+1 |
|
''' |
|
''' |
|
|
|
for img_folder in sorted(glob.glob(ann_file + '/*')): |
|
for img_name in sorted(glob.glob(img_folder + '/images/*')): |
|
#for img_folder in sorted(glob.glob(ann_file.replace('GT_data','images') + '/*')): |
|
# for i in sorted(glob.glob(ann_file + '*')): |
|
ann_in = self.get_ann_info_local(count) |
|
for loop in range(len(ann_in['bboxes'])): |
|
bbox = ann_in['bboxes'][loop] |
|
segmentation = ann_in['masks'][loop] |
|
|
|
data["annotations"].append({ |
|
'image_id': count, |
|
'category_id': 0, |
|
'iscrowd': 0, |
|
'id': obj_id, |
|
'area': int(bbox[2]*bbox[3]), |
|
'bbox': [int(bbox[0]), int(bbox[1]), int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])], |
|
'segmentation': [segmentation] |
|
}) |
|
obj_id = obj_id + 1 |
|
count = count+1 |
|
#if count<2 and count > 30: |
|
#if count > 5: |
|
# break |
|
#break |
|
''' |
|
import json |
|
json_str = json.dumps(data) |
|
with open(ann_file + '/ann.json', 'w') as f: |
|
f.write(json_str) |
|
|
|
def get_ann_info_local(self, idx): |
|
"""Get COCO annotation by index. |
|
|
|
Args: |
|
idx (int): Index of data. |
|
|
|
Returns: |
|
dict: Annotation info of specified index. |
|
""" |
|
return self._parse_ann_info_local(idx) |
|
|
|
def _parse_ann_info_local(self, idx): |
|
"""Parse bbox and mask annotation. |
|
|
|
Args: |
|
ann_info (list[dict]): Annotation info of an image. |
|
with_mask (bool): Whether to parse mask annotations. |
|
|
|
Returns: |
|
dict: A dict containing the following keys: bboxes, bboxes_ignore,\ |
|
labels, masks, seg_map. "masks" are raw annotations and not \ |
|
decoded into binary masks. |
|
""" |
|
try: |
|
img_info = self.data_infs[idx] |
|
except: |
|
img_info = self.data_infs[0] |
|
|
|
gt_bboxes = [] |
|
gt_labels = [] |
|
gt_bboxes_ignore = [] |
|
gt_masks_ann = [] |
|
gt_occ_percentage = [] |
|
import cv2 |
|
print(self.ann_file + '/Segmentation/' + img_info['filename']) |
|
|
|
|
|
|
|
|
|
try: |
|
seg_all = np.load(self.ann_file +img_info['camname']+ '/Segmentation/' + img_info['filename'].replace('jpg','npz')) |
|
print(seg_all['mask'].shape) |
|
for loop in range(seg_all['mask'].shape[0]): |
|
seg_o = seg_all['mask'][loop] |
|
segmentations_original, encoded_ground_truth_original, ground_truth_binary_mask_original = self.get_segmentation(seg_o, 1) |
|
seg_o[seg_o>0] =1 |
|
segmentations, encoded_ground_truth, ground_truth_binary_mask = self.get_segmentation(seg_o, 1) |
|
|
|
x1, y1, w, h = pycocotools.mask.toBbox(encoded_ground_truth) |
|
x1_o, y1_o, w_o, h_o = pycocotools.mask.toBbox(encoded_ground_truth_original) |
|
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) |
|
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) |
|
bbox = [x1, y1, x1 + w, y1 + h] |
|
bbox_o = [x1_o, y1_o, x1_o + w_o, y1_o + h_o] |
|
if len(segmentations_original) == 0: |
|
continue |
|
if w != w_o or h != h_o or len(np.unique(ground_truth_binary_mask-ground_truth_binary_mask_original)) >1: |
|
|
|
gt_masks_ann.append({'visible': segmentations_original,'full': segmentations}) |
|
gt_bboxes.append(bbox) |
|
gt_labels.append(0) |
|
gt_occ_percentage.append(100 - np.sum(ground_truth_binary_mask_original)/np.sum(ground_truth_binary_mask)*100) |
|
|
|
else: |
|
gt_masks_ann.append({'visible': segmentations,'full': segmentations}) |
|
gt_bboxes.append(bbox) |
|
gt_labels.append(0) |
|
gt_occ_percentage.append(0) |
|
|
|
if inter_w * inter_h == 0: |
|
continue |
|
|
|
|
|
|
|
gt_bboxes = np.array(gt_bboxes, dtype=np.float32) |
|
gt_labels = np.array(gt_labels, dtype=np.int64) |
|
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) |
|
seg_map = img_info['filename'] |
|
except: |
|
print('annotations failed to load for' ,img_info['filename']) |
|
if len(gt_bboxes) ==0 or gt_bboxes == []: |
|
ann = self._parse_ann_info_local(idx+1) |
|
print('annotations failed to load for' ,img_info['filename']) |
|
return ann |
|
|
|
ann = dict( |
|
bboxes=gt_bboxes, |
|
labels=gt_labels, |
|
bboxes_ignore=gt_bboxes_ignore, |
|
masks=gt_masks_ann, |
|
occ_percentage=gt_occ_percentage, |
|
seg_map=seg_map, |
|
image_name=img_info['filename']) |
|
|
|
return ann, idx |
|
|
|
def get_segmentation(self, seg, idx): |
|
ground_truth_binary_mask = seg.copy()*0 |
|
ground_truth_binary_mask[seg==idx] = 255 |
|
ground_truth_binary_mask = ground_truth_binary_mask[:,:,0] |
|
fortran_ground_truth_binary_mask = np.asfortranarray(ground_truth_binary_mask) |
|
encoded_ground_truth = pycocotools.mask.encode(fortran_ground_truth_binary_mask) |
|
ground_truth_area = pycocotools.mask.area(encoded_ground_truth) |
|
from skimage import measure |
|
contours = measure.find_contours(ground_truth_binary_mask, 0.5) |
|
segmentations = [] |
|
for contour in contours: |
|
contour = np.flip(contour, axis=1) |
|
segmentation = contour.ravel().tolist() |
|
segmentations.append(segmentation) |
|
return segmentations, encoded_ground_truth, ground_truth_binary_mask |
|
|
|
|
|
|
|
def get_ann_info(self, idx): |
|
"""Get COCO annotation by index. |
|
|
|
Args: |
|
idx (int): Index of data. |
|
|
|
Returns: |
|
dict: Annotation info of specified index. |
|
""" |
|
|
|
img_id = self.data_infos[idx]['id'] |
|
ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) |
|
ann_info = self.coco.load_anns(ann_ids) |
|
return self._parse_ann_info(self.data_infos[idx], ann_info) |
|
|
|
def get_cat_ids(self, idx): |
|
"""Get COCO category ids by index. |
|
|
|
Args: |
|
idx (int): Index of data. |
|
|
|
Returns: |
|
list[int]: All categories in the image of specified index. |
|
""" |
|
|
|
img_id = self.data_infos[idx]['id'] |
|
ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) |
|
ann_info = self.coco.load_anns(ann_ids) |
|
return [ann['category_id'] for ann in ann_info] |
|
|
|
def _filter_imgs(self, min_size=32): |
|
"""Filter images too small or without ground truths.""" |
|
valid_inds = [] |
|
|
|
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) |
|
|
|
ids_in_cat = set() |
|
for i, class_id in enumerate(self.cat_ids): |
|
ids_in_cat |= set(self.coco.cat_img_map[class_id]) |
|
|
|
|
|
ids_in_cat &= ids_with_ann |
|
|
|
valid_img_ids = [] |
|
for i, img_info in enumerate(self.data_infos): |
|
img_id = self.img_ids[i] |
|
if self.filter_empty_gt and img_id not in ids_in_cat: |
|
continue |
|
if min(img_info['width'], img_info['height']) >= min_size: |
|
valid_inds.append(i) |
|
valid_img_ids.append(img_id) |
|
self.img_ids = valid_img_ids |
|
return valid_inds |
|
|
|
def _parse_ann_info(self, img_info, ann_info): |
|
"""Parse bbox and mask annotation. |
|
|
|
Args: |
|
ann_info (list[dict]): Annotation info of an image. |
|
with_mask (bool): Whether to parse mask annotations. |
|
|
|
Returns: |
|
dict: A dict containing the following keys: bboxes, bboxes_ignore,\ |
|
labels, masks, seg_map. "masks" are raw annotations and not \ |
|
decoded into binary masks. |
|
""" |
|
gt_bboxes = [] |
|
gt_labels = [] |
|
gt_bboxes_ignore = [] |
|
gt_masks_ann = [] |
|
for i, ann in enumerate(ann_info): |
|
if ann.get('ignore', False): |
|
continue |
|
x1, y1, w, h = ann['bbox'] |
|
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) |
|
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) |
|
if inter_w * inter_h == 0: |
|
continue |
|
if ann['area'] <= 0 or w < 1 or h < 1: |
|
continue |
|
if ann['category_id'] not in self.cat_ids: |
|
continue |
|
bbox = [x1, y1, x1 + w, y1 + h] |
|
|
|
if ann.get('iscrowd', False): |
|
gt_bboxes_ignore.append(bbox) |
|
else: |
|
gt_bboxes.append(bbox) |
|
gt_labels.append(self.cat2label[ann['category_id']]) |
|
|
|
|
|
try: |
|
gt_masks_ann.append({'visible': ann.get('segmentation', None)[0]['visible'],'full': ann.get('segmentation', None)[0]['full']}) |
|
except: |
|
gt_masks_ann.append({'visible': ann.get('segmentation', None)[0]['visible']}) |
|
|
|
|
|
|
|
if gt_bboxes: |
|
gt_bboxes = np.array(gt_bboxes, dtype=np.float32) |
|
gt_labels = np.array(gt_labels, dtype=np.int64) |
|
else: |
|
gt_bboxes = np.zeros((0, 4), dtype=np.float32) |
|
gt_labels = np.array([], dtype=np.int64) |
|
|
|
if gt_bboxes_ignore: |
|
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) |
|
else: |
|
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) |
|
|
|
seg_map = img_info['filename'].replace('jpg', 'png') |
|
|
|
ann = dict( |
|
bboxes=gt_bboxes, |
|
labels=gt_labels, |
|
bboxes_ignore=gt_bboxes_ignore, |
|
masks=gt_masks_ann, |
|
seg_map=seg_map) |
|
|
|
return ann |
|
|
|
def xyxy2xywh(self, bbox): |
|
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO |
|
evaluation. |
|
|
|
Args: |
|
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in |
|
``xyxy`` order. |
|
|
|
Returns: |
|
list[float]: The converted bounding boxes, in ``xywh`` order. |
|
""" |
|
|
|
_bbox = bbox.tolist() |
|
return [ |
|
_bbox[0], |
|
_bbox[1], |
|
_bbox[2] - _bbox[0], |
|
_bbox[3] - _bbox[1], |
|
] |
|
|
|
def _proposal2json(self, results): |
|
"""Convert proposal results to COCO json style.""" |
|
json_results = [] |
|
for idx in range(len(self)): |
|
img_id = self.img_ids[idx] |
|
bboxes = results[idx] |
|
for i in range(bboxes.shape[0]): |
|
data = dict() |
|
data['image_id'] = img_id |
|
data['bbox'] = self.xyxy2xywh(bboxes[i]) |
|
data['score'] = float(bboxes[i][4]) |
|
data['category_id'] = 1 |
|
json_results.append(data) |
|
return json_results |
|
|
|
def _det2json(self, results): |
|
"""Convert detection results to COCO json style.""" |
|
json_results = [] |
|
for idx in range(len(self)): |
|
img_id = self.img_ids[idx] |
|
result = results[idx] |
|
for label in range(len(result)): |
|
bboxes = result[label] |
|
for i in range(bboxes.shape[0]): |
|
data = dict() |
|
data['image_id'] = img_id |
|
data['bbox'] = self.xyxy2xywh(bboxes[i]) |
|
data['score'] = float(bboxes[i][4]) |
|
data['category_id'] = self.cat_ids[label] |
|
json_results.append(data) |
|
return json_results |
|
|
|
def _segm2json(self, results): |
|
"""Convert instance segmentation results to COCO json style.""" |
|
bbox_json_results = [] |
|
segm_json_results = [] |
|
for idx in range(len(self)): |
|
img_id = self.img_ids[idx] |
|
det, seg = results[idx] |
|
for label in range(len(det)): |
|
|
|
bboxes = det[label] |
|
for i in range(bboxes.shape[0]): |
|
data = dict() |
|
data['image_id'] = img_id |
|
data['bbox'] = self.xyxy2xywh(bboxes[i]) |
|
data['score'] = float(bboxes[i][4]) |
|
data['category_id'] = self.cat_ids[label] |
|
bbox_json_results.append(data) |
|
|
|
|
|
|
|
if isinstance(seg, tuple): |
|
segms = seg[0][label] |
|
mask_score = seg[1][label] |
|
else: |
|
segms = seg[label] |
|
mask_score = [bbox[4] for bbox in bboxes] |
|
for i in range(bboxes.shape[0]): |
|
data = dict() |
|
data['image_id'] = img_id |
|
data['bbox'] = self.xyxy2xywh(bboxes[i]) |
|
data['score'] = float(mask_score[i]) |
|
data['category_id'] = self.cat_ids[label] |
|
if isinstance(segms[i]['counts'], bytes): |
|
segms[i]['counts'] = segms[i]['counts'].decode() |
|
data['segmentation'] = segms[i] |
|
segm_json_results.append(data) |
|
return bbox_json_results, segm_json_results |
|
|
|
def results2json(self, results, outfile_prefix): |
|
"""Dump the detection results to a COCO style json file. |
|
|
|
There are 3 types of results: proposals, bbox predictions, mask |
|
predictions, and they have different data types. This method will |
|
automatically recognize the type, and dump them to json files. |
|
|
|
Args: |
|
results (list[list | tuple | ndarray]): Testing results of the |
|
dataset. |
|
outfile_prefix (str): The filename prefix of the json files. If the |
|
prefix is "somepath/xxx", the json files will be named |
|
"somepath/xxx.bbox.json", "somepath/xxx.segm.json", |
|
"somepath/xxx.proposal.json". |
|
|
|
Returns: |
|
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ |
|
values are corresponding filenames. |
|
""" |
|
result_files = dict() |
|
if isinstance(results[0], list): |
|
json_results = self._det2json(results) |
|
result_files['bbox'] = f'{outfile_prefix}.bbox.json' |
|
result_files['proposal'] = f'{outfile_prefix}.bbox.json' |
|
mmcv.dump(json_results, result_files['bbox']) |
|
elif isinstance(results[0], tuple): |
|
json_results = self._segm2json(results) |
|
result_files['bbox'] = f'{outfile_prefix}.bbox.json' |
|
result_files['proposal'] = f'{outfile_prefix}.bbox.json' |
|
result_files['segm'] = f'{outfile_prefix}.segm.json' |
|
mmcv.dump(json_results[0], result_files['bbox']) |
|
mmcv.dump(json_results[1], result_files['segm']) |
|
elif isinstance(results[0], np.ndarray): |
|
json_results = self._proposal2json(results) |
|
result_files['proposal'] = f'{outfile_prefix}.proposal.json' |
|
mmcv.dump(json_results, result_files['proposal']) |
|
else: |
|
raise TypeError('invalid type of results') |
|
return result_files |
|
|
|
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): |
|
gt_bboxes = [] |
|
for i in range(len(self.img_ids)): |
|
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) |
|
ann_info = self.coco.load_anns(ann_ids) |
|
if len(ann_info) == 0: |
|
gt_bboxes.append(np.zeros((0, 4))) |
|
continue |
|
bboxes = [] |
|
for ann in ann_info: |
|
if ann.get('ignore', False) or ann['iscrowd']: |
|
continue |
|
x1, y1, w, h = ann['bbox'] |
|
bboxes.append([x1, y1, x1 + w, y1 + h]) |
|
|
|
bboxes = np.array(bboxes, dtype=np.float32) |
|
if bboxes.shape[0] == 0: |
|
bboxes = np.zeros((0, 4)) |
|
gt_bboxes.append(bboxes) |
|
|
|
recalls = eval_recalls( |
|
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) |
|
ar = recalls.mean(axis=1) |
|
return ar |
|
|
|
def format_results(self, results, jsonfile_prefix=None, **kwargs): |
|
"""Format the results to json (standard format for COCO evaluation). |
|
|
|
Args: |
|
results (list[tuple | numpy.ndarray]): Testing results of the |
|
dataset. |
|
jsonfile_prefix (str | None): The prefix of json files. It includes |
|
the file path and the prefix of filename, e.g., "a/b/prefix". |
|
If not specified, a temp file will be created. Default: None. |
|
|
|
Returns: |
|
tuple: (result_files, tmp_dir), result_files is a dict containing \ |
|
the json filepaths, tmp_dir is the temporal directory created \ |
|
for saving json files when jsonfile_prefix is not specified. |
|
""" |
|
assert isinstance(results, list), 'results must be a list' |
|
assert len(results) == len(self), ( |
|
'The length of results is not equal to the dataset len: {} != {}'. |
|
format(len(results), len(self))) |
|
|
|
if jsonfile_prefix is None: |
|
tmp_dir = tempfile.TemporaryDirectory() |
|
jsonfile_prefix = osp.join(tmp_dir.name, 'results') |
|
|
|
else: |
|
tmp_dir = None |
|
result_files = self.results2json(results, jsonfile_prefix) |
|
return result_files, tmp_dir |
|
|
|
def evaluate(self, |
|
results, |
|
metric='bbox', |
|
logger=None, |
|
jsonfile_prefix=None, |
|
classwise=False, |
|
proposal_nums=(100, 300, 1000), |
|
iou_thrs=None, |
|
metric_items=None): |
|
"""Evaluation in COCO protocol. |
|
|
|
Args: |
|
results (list[list | tuple]): Testing results of the dataset. |
|
metric (str | list[str]): Metrics to be evaluated. Options are |
|
'bbox', 'segm', 'proposal', 'proposal_fast'. |
|
logger (logging.Logger | str | None): Logger used for printing |
|
related information during evaluation. Default: None. |
|
jsonfile_prefix (str | None): The prefix of json files. It includes |
|
the file path and the prefix of filename, e.g., "a/b/prefix". |
|
If not specified, a temp file will be created. Default: None. |
|
classwise (bool): Whether to evaluating the AP for each class. |
|
proposal_nums (Sequence[int]): Proposal number used for evaluating |
|
recalls, such as recall@100, recall@1000. |
|
Default: (100, 300, 1000). |
|
iou_thrs (Sequence[float], optional): IoU threshold used for |
|
evaluating recalls/mAPs. If set to a list, the average of all |
|
IoUs will also be computed. If not specified, [0.50, 0.55, |
|
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. |
|
Default: None. |
|
metric_items (list[str] | str, optional): Metric items that will |
|
be returned. If not specified, ``['AR@100', 'AR@300', |
|
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be |
|
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', |
|
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when |
|
``metric=='bbox' or metric=='segm'``. |
|
|
|
Returns: |
|
dict[str, float]: COCO style evaluation metric. |
|
""" |
|
|
|
metrics = metric if isinstance(metric, list) else [metric] |
|
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] |
|
for metric in metrics: |
|
if metric not in allowed_metrics: |
|
raise KeyError(f'metric {metric} is not supported') |
|
if iou_thrs is None: |
|
iou_thrs = np.linspace( |
|
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) |
|
if metric_items is not None: |
|
if not isinstance(metric_items, list): |
|
metric_items = [metric_items] |
|
|
|
result_files_all, tmp_dir = self.format_results(results, jsonfile_prefix) |
|
|
|
eval_results = OrderedDict() |
|
cocoGt_all = self.coco |
|
''' |
|
for loop, an in enumerate(cocoGt.anns): |
|
try: |
|
cocoGt.anns[loop]['segmentation'] = cocoGt.anns[loop]['segmentation'][0]['full'] |
|
except: |
|
continue |
|
''' |
|
results_all =[] |
|
|
|
for metric in metrics: |
|
msg = f'Evaluating {metric}...' |
|
if logger is None: |
|
msg = '\n' + msg |
|
print_log(msg, logger=logger) |
|
|
|
if metric == 'proposal_fast': |
|
ar = self.fast_eval_recall( |
|
results, proposal_nums, iou_thrs, logger='silent') |
|
log_msg = [] |
|
for i, num in enumerate(proposal_nums): |
|
eval_results[f'AR@{num}'] = ar[i] |
|
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') |
|
log_msg = ''.join(log_msg) |
|
print_log(log_msg, logger=logger) |
|
continue |
|
|
|
if metric not in result_files_all: |
|
raise KeyError(f'{metric} is not in results') |
|
|
|
''' |
|
try: |
|
cocoDt = cocoGt.loadRes(result_files[metric]) |
|
except IndexError: |
|
print_log( |
|
'The testing results of the whole dataset is empty.', |
|
logger=logger, |
|
level=logging.ERROR) |
|
break |
|
''' |
|
|
|
iou_type = 'bbox' if metric == 'proposal' else metric |
|
''' |
|
import pickle |
|
with open(f'testGt.pickle','wb') as file: |
|
pickle.dump(cocoGt, file) |
|
with open(f'testDt.pickle','wb') as file1: |
|
pickle.dump(cocoDt, file1) |
|
np.savez('data_cocoeval', cocoGt=cocoGt,cocoDt=cocoDt, iou_type=iou_type) |
|
''' |
|
cocoGt = deepcopy(cocoGt_all) |
|
result_files = deepcopy(result_files_all) |
|
cocoDt = cocoGt.loadRes(result_files[metric]) |
|
for ind, d in enumerate(cocoGt.anns): |
|
cocoGt.anns[ind]['segmentation'] = cocoGt.anns[ind]['segmentation'][0]['full'] |
|
for i in range(11): |
|
i = i-1 |
|
cocoGt = deepcopy(cocoGt_all) |
|
result_files = deepcopy(result_files_all) |
|
cocoDt = cocoGt.loadRes(result_files[metric]) |
|
for ind, d in enumerate(cocoGt.anns): |
|
cocoGt.anns[ind]['segmentation'] = cocoGt.anns[ind]['segmentation'][0]['full'] |
|
cocoEval = COCOeval(cocoGt, cocoDt, metric) |
|
cocoEval.percentage_occ = i |
|
cocoEval.params.useCats = 0 |
|
cocoEval.evaluate() |
|
cocoEval.accumulate() |
|
cocoEval.summarize() |
|
str123 = '{:s}'.format(' '.join(['{:.2f}'.format(x) for x in cocoEval.stats])) |
|
results_all.append(str123 + ' '+str(metric)+' '+ str(i)) |
|
np.savetxt('results.out', results_all, delimiter=',', fmt="%s") |
|
''' |
|
''' |
|
|
|
cocoEval = COCOeval(cocoGt, cocoDt, iou_type) |
|
|
|
cocoEval.params.catIds = self.cat_ids |
|
cocoEval.params.imgIds = self.img_ids |
|
cocoEval.params.maxDets = list(proposal_nums) |
|
cocoEval.params.iouThrs = iou_thrs |
|
|
|
coco_metric_names = { |
|
'mAP': 0, |
|
'mAP_50': 1, |
|
'mAP_75': 2, |
|
'mAP_s': 3, |
|
'mAP_m': 4, |
|
'mAP_l': 5, |
|
'AR@100': 6, |
|
'AR@300': 7, |
|
'AR@1000': 8, |
|
'AR_s@1000': 9, |
|
'AR_m@1000': 10, |
|
'AR_l@1000': 11 |
|
} |
|
if metric_items is not None: |
|
for metric_item in metric_items: |
|
if metric_item not in coco_metric_names: |
|
raise KeyError( |
|
f'metric item {metric_item} is not supported') |
|
''' |
|
with open(f'cocoEval.pickle','wb') as file1: |
|
pickle.dump(cocoEval, file1) |
|
''' |
|
if metric == 'proposal': |
|
cocoEval.params.useCats = 0 |
|
cocoEval.evaluate() |
|
cocoEval.accumulate() |
|
cocoEval.summarize() |
|
if metric_items is None: |
|
metric_items = [ |
|
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', |
|
'AR_m@1000', 'AR_l@1000' |
|
] |
|
|
|
for item in metric_items: |
|
val = float( |
|
f'{cocoEval.stats[coco_metric_names[item]]:.3f}') |
|
eval_results[item] = val |
|
else: |
|
cocoEval.evaluate() |
|
cocoEval.accumulate() |
|
cocoEval.summarize() |
|
if classwise: |
|
|
|
|
|
precisions = cocoEval.eval['precision'] |
|
|
|
assert len(self.cat_ids) == precisions.shape[2] |
|
|
|
results_per_category = [] |
|
for idx, catId in enumerate(self.cat_ids): |
|
|
|
|
|
nm = self.coco.loadCats(catId)[0] |
|
precision = precisions[:, :, idx, 0, -1] |
|
precision = precision[precision > -1] |
|
if precision.size: |
|
ap = np.mean(precision) |
|
else: |
|
ap = float('nan') |
|
results_per_category.append( |
|
(f'{nm["name"]}', f'{float(ap):0.3f}')) |
|
|
|
num_columns = min(6, len(results_per_category) * 2) |
|
results_flatten = list( |
|
itertools.chain(*results_per_category)) |
|
headers = ['category', 'AP'] * (num_columns // 2) |
|
results_2d = itertools.zip_longest(*[ |
|
results_flatten[i::num_columns] |
|
for i in range(num_columns) |
|
]) |
|
table_data = [headers] |
|
table_data += [result for result in results_2d] |
|
table = AsciiTable(table_data) |
|
print_log('\n' + table.table, logger=logger) |
|
|
|
if metric_items is None: |
|
metric_items = [ |
|
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' |
|
] |
|
|
|
for metric_item in metric_items: |
|
key = f'{metric}_{metric_item}' |
|
val = float( |
|
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' |
|
) |
|
eval_results[key] = val |
|
ap = cocoEval.stats[:6] |
|
eval_results[f'{metric}_mAP_copypaste'] = ( |
|
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' |
|
f'{ap[4]:.3f} {ap[5]:.3f}') |
|
if tmp_dir is not None: |
|
tmp_dir.cleanup() |
|
return eval_results |
|
|