|
|
|
import pytest |
|
import torch |
|
from torch.nn.modules import GroupNorm |
|
from torch.nn.modules.batchnorm import _BatchNorm |
|
|
|
from mmpose.models.backbones.utils import (InvertedResidual, SELayer, |
|
channel_shuffle, make_divisible) |
|
|
|
|
|
def is_norm(modules): |
|
"""Check if is one of the norms.""" |
|
if isinstance(modules, (GroupNorm, _BatchNorm)): |
|
return True |
|
return False |
|
|
|
|
|
def test_make_divisible(): |
|
|
|
result = make_divisible(34, 8, None) |
|
assert result == 32 |
|
|
|
|
|
result = make_divisible(10, 8, min_ratio=0.9) |
|
assert result == 16 |
|
|
|
|
|
result = make_divisible(33, 8, min_ratio=0.8) |
|
assert result == 32 |
|
|
|
|
|
def test_channel_shuffle(): |
|
x = torch.randn(1, 24, 56, 56) |
|
with pytest.raises(AssertionError): |
|
|
|
channel_shuffle(x, 7) |
|
|
|
groups = 3 |
|
batch_size, num_channels, height, width = x.size() |
|
channels_per_group = num_channels // groups |
|
out = channel_shuffle(x, groups) |
|
|
|
for b in range(batch_size): |
|
for c in range(num_channels): |
|
c_out = c % channels_per_group * groups + c // channels_per_group |
|
for i in range(height): |
|
for j in range(width): |
|
assert x[b, c, i, j] == out[b, c_out, i, j] |
|
|
|
|
|
def test_inverted_residual(): |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
InvertedResidual(16, 16, 32, stride=3) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
InvertedResidual(16, 16, 32, se_cfg=list()) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
|
|
InvertedResidual(16, 16, 32, with_expand_conv=False) |
|
|
|
|
|
block = InvertedResidual(16, 16, 32, stride=1) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert getattr(block, 'se', None) is None |
|
assert block.with_res_shortcut |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|
|
|
|
block = InvertedResidual(16, 16, 32, stride=2) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert not block.with_res_shortcut |
|
assert x_out.shape == torch.Size((1, 16, 28, 28)) |
|
|
|
|
|
se_cfg = dict(channels=32) |
|
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert isinstance(block.se, SELayer) |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|
|
|
|
block = InvertedResidual(32, 16, 32, with_expand_conv=False) |
|
x = torch.randn(1, 32, 56, 56) |
|
x_out = block(x) |
|
assert getattr(block, 'expand_conv', None) is None |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|
|
|
|
block = InvertedResidual( |
|
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
for m in block.modules(): |
|
if is_norm(m): |
|
assert isinstance(m, GroupNorm) |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|
|
|
|
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|
|
|
|
block = InvertedResidual(16, 16, 32, with_cp=True) |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert block.with_cp |
|
assert x_out.shape == torch.Size((1, 16, 56, 56)) |
|
|