Spaces:
Build error
Build error
File size: 6,080 Bytes
c7e10ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
from mmdet.datasets import (CityscapesDataset, CocoDataset,
CocoPanopticDataset, VOCDataset)
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
def test_palette():
assert vis.palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = vis.get_palette(palette, 3)
for color, color_ in zip(palette, palette_):
assert color == color_
# test tuple
palette = vis.get_palette((1, 2, 3), 3)
assert len(palette) == 3
for color in palette:
assert color == (1, 2, 3)
# test color str
palette = vis.get_palette('red', 3)
assert len(palette) == 3
for color in palette:
assert color == (255, 0, 0)
# test dataset str
palette = vis.get_palette('coco', len(CocoDataset.CLASSES))
assert len(palette) == len(CocoDataset.CLASSES)
assert palette[0] == (220, 20, 60)
palette = vis.get_palette('coco', len(CocoPanopticDataset.CLASSES))
assert len(palette) == len(CocoPanopticDataset.CLASSES)
assert palette[-1] == (250, 141, 255)
palette = vis.get_palette('voc', len(VOCDataset.CLASSES))
assert len(palette) == len(VOCDataset.CLASSES)
assert palette[0] == (106, 0, 228)
palette = vis.get_palette('citys', len(CityscapesDataset.CLASSES))
assert len(palette) == len(CityscapesDataset.CLASSES)
assert palette[0] == (220, 20, 60)
# test random
palette1 = vis.get_palette('random', 3)
palette2 = vis.get_palette(None, 3)
for color1, color2 in zip(palette1, palette2):
assert isinstance(color1, tuple)
assert isinstance(color2, tuple)
assert color1 == color2
|