compvis / test /geometry /test_bbox.py
Dexter's picture
Upload folder using huggingface_hub
36c95ba verified
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import kornia
import kornia.testing as utils
from kornia.geometry.bbox import infer_bbox_shape, infer_bbox_shape3d, transform_bbox, validate_bbox, validate_bbox3d
class TestBbox2D:
def test_smoke(self, device, dtype):
# Sample two points of the rectangle
points = torch.rand(1, 4, device=device, dtype=dtype)
# Fill according missing points
bbox = torch.zeros(1, 4, 2, device=device, dtype=dtype)
bbox[0, 0] = points[0][:2]
bbox[0, 1, 0] = points[0][2]
bbox[0, 1, 1] = points[0][1]
bbox[0, 2] = points[0][2:]
bbox[0, 3, 0] = points[0][0]
bbox[0, 3, 1] = points[0][3]
# Validate
assert validate_bbox(bbox)
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor([[[1.0, 1.0], [3.0, 1.0], [3.0, 2.0], [1.0, 2.0]]], device=device, dtype=dtype)
h, w = infer_bbox_shape(boxes)
assert (h, w) == (2, 3)
def test_bounding_boxes_dim_inferring_batch(self, device, dtype):
boxes = torch.tensor(
[[[1.0, 1.0], [3.0, 1.0], [3.0, 2.0], [1.0, 2.0]], [[2.0, 2.0], [4.0, 2.0], [4.0, 3.0], [2.0, 3.0]]],
device=device,
dtype=dtype,
)
h, w = infer_bbox_shape(boxes)
assert (h.unique().item(), w.unique().item()) == (2, 3)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor([[[1.0, 1.0], [3.0, 1.0], [3.0, 2.0], [1.0, 2.0]]], device=device, dtype=dtype)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(infer_bbox_shape, (boxes,), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = infer_bbox_shape
op_script = torch.jit.script(op)
# Define input
boxes = torch.tensor([[[1.0, 1.0], [3.0, 1.0], [3.0, 2.0], [1.0, 2.0]]], device=device, dtype=dtype)
# Run
expected = op(boxes)
actual = op_script(boxes)
# Compare
assert_allclose(actual, expected)
class TestTransformBoxes2D:
def test_transform_boxes(self, device, dtype):
boxes = torch.tensor([[139.2640, 103.0150, 397.3120, 410.5225]], device=device, dtype=dtype)
expected = torch.tensor([[372.7360, 103.0150, 114.6880, 410.5225]], device=device, dtype=dtype)
trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)
out = transform_bbox(trans_mat, boxes)
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_transform_multiple_boxes(self, device, dtype):
boxes = torch.tensor(
[
[139.2640, 103.0150, 397.3120, 410.5225],
[1.0240, 80.5547, 512.0000, 512.0000],
[165.2053, 262.1440, 510.6347, 508.9280],
[119.8080, 144.2067, 257.0240, 410.1292],
],
device=device,
dtype=dtype,
)
boxes = boxes.repeat(2, 1, 1) # 2 x 4 x 4 two images 4 boxes each
expected = torch.tensor(
[
[
[372.7360, 103.0150, 114.6880, 410.5225],
[510.9760, 80.5547, 0.0000, 512.0000],
[346.7947, 262.1440, 1.3653, 508.9280],
[392.1920, 144.2067, 254.9760, 410.1292],
],
[
[139.2640, 103.0150, 397.3120, 410.5225],
[1.0240, 80.5547, 512.0000, 512.0000],
[165.2053, 262.1440, 510.6347, 508.9280],
[119.8080, 144.2067, 257.0240, 410.1292],
],
],
device=device,
dtype=dtype,
)
trans_mat = torch.tensor(
[
[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
],
device=device,
dtype=dtype,
)
out = transform_bbox(trans_mat, boxes)
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_transform_boxes_wh(self, device, dtype):
boxes = torch.tensor(
[
[139.2640, 103.0150, 258.0480, 307.5075],
[1.0240, 80.5547, 510.9760, 431.4453],
[165.2053, 262.1440, 345.4293, 246.7840],
[119.8080, 144.2067, 137.2160, 265.9225],
],
device=device,
dtype=dtype,
)
expected = torch.tensor(
[
[372.7360, 103.0150, -258.0480, 307.5075],
[510.9760, 80.5547, -510.9760, 431.4453],
[346.7947, 262.1440, -345.4293, 246.7840],
[392.1920, 144.2067, -137.2160, 265.9225],
],
device=device,
dtype=dtype,
)
trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)
out = transform_bbox(trans_mat, boxes, mode='xywh')
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device, dtype):
boxes = torch.tensor(
[
[139.2640, 103.0150, 258.0480, 307.5075],
[1.0240, 80.5547, 510.9760, 431.4453],
[165.2053, 262.1440, 345.4293, 246.7840],
[119.8080, 144.2067, 137.2160, 265.9225],
],
device=device,
dtype=dtype,
)
trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)
trans_mat = utils.tensor_to_gradcheck_var(trans_mat)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(transform_bbox, (trans_mat, boxes), raise_exception=True)
def test_jit(self, device, dtype):
boxes = torch.tensor([[139.2640, 103.0150, 258.0480, 307.5075]], device=device, dtype=dtype)
trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)
args = (boxes, trans_mat)
op = kornia.geometry.transform_points
op_jit = torch.jit.script(op)
assert_allclose(op(*args), op_jit(*args))
class TestBbox3D:
def test_smoke(self, device, dtype):
# Sample two points of the 3d rect
points = torch.rand(1, 6, device=device, dtype=dtype)
# Fill according missing points
bbox = torch.zeros(1, 8, 3, device=device, dtype=dtype)
bbox[0, 0] = points[0][:3]
bbox[0, 1, 0] = points[0][3]
bbox[0, 1, 1] = points[0][1]
bbox[0, 1, 2] = points[0][2]
bbox[0, 2, 0] = points[0][3]
bbox[0, 2, 1] = points[0][4]
bbox[0, 2, 2] = points[0][2]
bbox[0, 3, 0] = points[0][0]
bbox[0, 3, 1] = points[0][4]
bbox[0, 3, 2] = points[0][2]
bbox[0, 4, 0] = points[0][0]
bbox[0, 4, 1] = points[0][1]
bbox[0, 4, 2] = points[0][5]
bbox[0, 5, 0] = points[0][3]
bbox[0, 5, 1] = points[0][1]
bbox[0, 5, 2] = points[0][5]
bbox[0, 6] = points[0][3:]
bbox[0, 7, 0] = points[0][0]
bbox[0, 7, 1] = points[0][4]
bbox[0, 7, 2] = points[0][5]
# Validate
assert validate_bbox3d(bbox)
def test_bounding_boxes_dim_inferring(self, device, dtype):
boxes = torch.tensor(
[
[[0, 1, 2], [10, 1, 2], [10, 21, 2], [0, 21, 2], [0, 1, 32], [10, 1, 32], [10, 21, 32], [0, 21, 32]],
[[3, 4, 5], [43, 4, 5], [43, 54, 5], [3, 54, 5], [3, 4, 65], [43, 4, 65], [43, 54, 65], [3, 54, 65]],
],
device=device,
dtype=dtype,
) # 2x8x3
d, h, w = infer_bbox_shape3d(boxes)
assert_allclose(d, torch.tensor([31.0, 61.0], device=device, dtype=dtype))
assert_allclose(h, torch.tensor([21.0, 51.0], device=device, dtype=dtype))
assert_allclose(w, torch.tensor([11.0, 41.0], device=device, dtype=dtype))
def test_gradcheck(self, device, dtype):
boxes = torch.tensor(
[[[0, 1, 2], [10, 1, 2], [10, 21, 2], [0, 21, 2], [0, 1, 32], [10, 1, 32], [10, 21, 32], [0, 21, 32]]],
device=device,
dtype=dtype,
)
boxes = utils.tensor_to_gradcheck_var(boxes)
assert gradcheck(infer_bbox_shape3d, (boxes,), raise_exception=True)
def test_jit(self, device, dtype):
# Define script
op = infer_bbox_shape3d
op_script = torch.jit.script(op)
boxes = torch.tensor(
[[[0, 0, 1], [3, 0, 1], [3, 2, 1], [0, 2, 1], [0, 0, 3], [3, 0, 3], [3, 2, 3], [0, 2, 3]]],
device=device,
dtype=dtype,
) # 1x8x3
actual = op_script(boxes)
expected = op(boxes)
assert_allclose(actual, expected)