code
stringlengths
17
6.64M
def test_build_activation_layer(): with pytest.raises(TypeError): cfg = 'ReLU' build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_activation_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyReLU') build_activation_laye...
def test_build_padding_layer(): with pytest.raises(TypeError): cfg = 'reflect' build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_padding_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPad') build_padding_layer(cfg) ...
def test_upsample_layer(): with pytest.raises(TypeError): cfg = 'bilinear' build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_upsample_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyUpsample') build_upsample_layer(cfg)...
def test_pixel_shuffle_pack(): x_in = torch.rand(2, 3, 10, 10) pixel_shuffle = PixelShufflePack(3, 3, scale_factor=2, upsample_kernel=3) assert (pixel_shuffle.upsample_conv.kernel_size == (3, 3)) x_out = pixel_shuffle(x_in) assert (x_out.shape == (2, 3, 20, 20))
def test_is_norm(): norm_set1 = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm] norm_set2 = [nn.GroupNorm] for norm_type in norm_set1: layer = norm_type(3) assert is_norm(layer) assert (not is_norm(layer, exclu...
def test_infer_plugin_abbr(): with pytest.raises(TypeError): infer_plugin_abbr(0) class MyPlugin(): _abbr_ = 'mp' assert (infer_plugin_abbr(MyPlugin) == 'mp') class FancyPlugin(): pass assert (infer_plugin_abbr(FancyPlugin) == 'fancy_plugin')
def test_build_plugin_layer(): with pytest.raises(TypeError): cfg = 'Plugin' build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict() build_plugin_layer(cfg) with pytest.raises(KeyError): cfg = dict(type='FancyPlugin') build_plugin_layer(cfg) w...
def test_context_block(): with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), pooling_type='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fusion_types='unsupport_type') with pytest.raises(AssertionError): ContextBlock(16, (1.0 / 4), fu...
def test_conv2d_samepadding(): inputs = torch.rand((1, 3, 28, 28)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output = conv(inputs) assert (output.shape == inputs.shape) inputs = torch.rand((1, 3, 13, 13)) conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1) output ...
@CONV_LAYERS.register_module() class ExampleConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None): super(ExampleConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels...
def test_conv_module(): with pytest.raises(AssertionError): conv_cfg = 'conv' ConvModule(3, 8, 2, conv_cfg=conv_cfg) with pytest.raises(AssertionError): norm_cfg = 'norm' ConvModule(3, 8, 2, norm_cfg=norm_cfg) with pytest.raises(KeyError): act_cfg = dict(type='softm...
def test_bias(): conv = ConvModule(3, 8, 2) assert (conv.conv.bias is not None) conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN')) assert (conv.conv.bias is None) conv = ConvModule(3, 8, 2, bias=False) assert (conv.conv.bias is None) with pytest.warns(UserWarning) as record: Conv...
def conv_forward(self, x): return (x + '_conv')
def bn_forward(self, x): return (x + '_bn')
def relu_forward(self, x): return (x + '_relu')
@patch('torch.nn.ReLU.forward', relu_forward) @patch('torch.nn.BatchNorm2d.forward', bn_forward) @patch('torch.nn.Conv2d.forward', conv_forward) def test_order(): with pytest.raises(AssertionError): order = ['conv', 'norm', 'act'] ConvModule(3, 8, 2, order=order) with pytest.raises(AssertionEr...
def test_depthwise_separable_conv(): with pytest.raises(AssertionError): DepthwiseSeparableConvModule(4, 8, 2, groups=2) conv = DepthwiseSeparableConvModule(3, 8, 2) assert (conv.depthwise_conv.conv.groups == 3) assert (conv.pointwise_conv.conv.kernel_size == (1, 1)) assert (not conv.depth...
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(3, 8, 3) def forward(self, imgs): x = torch.randn((1, *imgs)) return self.conv2d(x)
def input_constructor(x): return dict(imgs=x)
def test_flops_counter(): with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = [1, 3, 16, 16] get_model_complexity_info(model, input_res) with pytest.raises(AssertionError): model = nn.Conv2d(3, 8, 3) input_res = tuple() get_model_complexit...
def test_flops_to_string(): flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops) == '6.54 GFLOPs') assert (flops_to_string(flops, 'MFLOPs') == '6543.21 MFLOPs') assert (flops_to_string(flops, 'KFLOPs') == '6543210.0 KFLOPs') assert (flops_to_string(flops, 'FLOPs') == '6543210000.0 FLOPs'...
def test_params_to_string(): num_params = (3.21 * (10.0 ** 7)) assert (params_to_string(num_params) == '32.1 M') num_params = (4.56 * (10.0 ** 5)) assert (params_to_string(num_params) == '456.0 k') num_params = (7.89 * (10.0 ** 2)) assert (params_to_string(num_params) == '789.0') num_param...
def test_fuse_conv_bn(): inputs = torch.rand((1, 3, 5, 5)) modules = nn.ModuleList() modules.append(nn.BatchNorm2d(3)) modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN'))) modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN'))) modules = nn.Sequential(*modules) fused_modules...
def test_context_block(): imgs = torch.randn(2, 16, 20, 20) gen_attention_block = GeneralizedAttention(16, attention_type='1000') assert (gen_attention_block.query_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16) assert (gen_attention_block.key_conv.in_channels == 16...
def test_hsigmoid(): with pytest.raises(AssertionError): HSigmoid(divisor=0) act = HSigmoid() input_shape = torch.Size([1, 3, 64, 64]) input = torch.randn(input_shape) output = act(input) expected_output = torch.min(torch.max(((input + 3) / 6), torch.zeros(input_shape)), torch.ones(inp...
def test_hswish(): act = HSwish(inplace=True) assert act.act.inplace act = HSwish() assert (not act.act.inplace) input = torch.randn(1, 3, 64, 64) expected_output = ((input * relu6((input + 3))) / 6) output = act(input) assert (output.shape == expected_output.shape) assert torch.eq...
def test_build_model_from_cfg(): BACKBONES = mmcv.Registry('backbone', build_func=build_model_from_cfg) @BACKBONES.register_module() class ResNet(nn.Module): def __init__(self, depth, stages=4): super().__init__() self.depth = depth self.stages = stages ...
def test_nonlocal(): with pytest.raises(ValueError): _NonLocalNd(3, mode='unsupport_mode') _NonLocalNd(3) _NonLocalNd(3, norm_cfg=dict(type='BN')) _NonLocalNd(3, zeros_init=False) _NonLocalNd(3, norm_cfg=dict(type='BN'), zeros_init=False)
def test_nonlocal3d(): imgs = torch.randn(2, 3, 10, 20, 20) nonlocal_3d = NonLocal3d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_3d.cuda() out = nonlocal_3d(imgs) assert (out.shape == imgs.shape) nonlocal_3d ...
def test_nonlocal2d(): imgs = torch.randn(2, 3, 20, 20) nonlocal_2d = NonLocal2d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_2d.cuda() out = nonlocal_2d(imgs) assert (out.shape == imgs.shape) imgs = torch.ran...
def test_nonlocal1d(): imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2...
def test_revert_syncbn(): conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN')) x = torch.randn(1, 3, 10, 10) with pytest.raises(ValueError): y = conv(x) conv = revert_sync_batchnorm(conv) y = conv(x) assert (y.shape == (1, 8, 9, 9))
def test_revert_mmsyncbn(): if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) < 2)): print('Must run on slurm with more than 1 process!\nsrun -p test --gres=gpu:2 -n2') return rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NTASKS']) l...
def test_scale(): scale = Scale() assert (scale.scale.data == 1.0) assert (scale.scale.dtype == torch.float) x = torch.rand(1, 3, 64, 64) output = scale(x) assert (output.shape == (1, 3, 64, 64)) scale = Scale(10.0) assert (scale.scale.data == 10.0) assert (scale.scale.dtype == tor...
def test_swish(): act = Swish() input = torch.randn(1, 3, 64, 64) expected_output = (input * F.sigmoid(input)) output = act(input) assert (output.shape == expected_output.shape) assert torch.equal(output, expected_output)
def test_adaptive_padding(): for padding in ('same', 'corner'): kernel_size = 16 stride = 16 dilation = 1 input = torch.rand(1, 1, 15, 17) adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) out = adap_pad(input) ...
def test_patch_embed(): B = 2 H = 3 W = 4 C = 3 embed_dims = 10 kernel_size = 3 stride = 1 dummy_input = torch.rand(B, C, H, W) patch_merge_1 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=1, norm_cfg=None) (x1, sh...
def test_patch_merging(): in_c = 3 out_c = 4 kernel_size = 3 stride = 3 padding = 1 dilation = 1 bias = False patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) (B, L, C) = (1, 100,...
def test_multiheadattention(): MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='Dropout', drop_prob=0.0), batch_first=True) batch_dim = 2 embed_dim = 5 num_query = 100 attn_batch_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj...
def test_ffn(): with pytest.raises(AssertionError): FFN(num_fcs=1) FFN(dropout=0, add_residual=True) ffn = FFN(dropout=0, add_identity=True) input_tensor = torch.rand(2, 20, 256) input_tensor_nbc = input_tensor.transpose(0, 1) assert torch.allclose(ffn(input_tensor).sum(), ffn(input_te...
@pytest.mark.skipif((not torch.cuda.is_available()), reason='Cuda not available') def test_basetransformerlayer_cuda(): operation_order = ('self_attn', 'ffn') baselayer = BaseTransformerLayer(operation_order=operation_order, batch_first=True, attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads...
@pytest.mark.parametrize('embed_dims', [False, 256]) def test_basetransformerlayer(embed_dims): attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),) if embed_dims: ffn_cfgs = dict(type='FFN', embed_dims=embed_dims, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict...
def test_transformerlayersequence(): squeue = TransformerLayerSequence(num_layers=6, transformerlayers=dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256, num_heads=4)], feedforward_channels=1024, f...
def test_drop_path(): drop_path = DropPath(drop_prob=0) test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path = DropPath(drop_prob=0.1) drop_path.training = False test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path.training = Tr...
def test_constant_init(): conv_module = nn.Conv2d(3, 16, 3) constant_init(conv_module, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1)) assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) co...
def test_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) xavier_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) xavier_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): xavier_init(conv_module, distribution='...
def test_normal_init(): conv_module = nn.Conv2d(3, 16, 3) normal_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) normal_init(conv_module_no_bias)
def test_trunc_normal_init(): def _random_float(a, b): return (((b - a) * random.random()) + a) def _is_trunc_normal(tensor, mean, std, a, b): z_samples = ((tensor.view((- 1)) - mean) / std) z_samples = z_samples.tolist() a0 = ((a - mean) / std) b0 = ((b - mean) / std...
def test_uniform_init(): conv_module = nn.Conv2d(3, 16, 3) uniform_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) uniform_init(conv_module_no_bias)
def test_kaiming_init(): conv_module = nn.Conv2d(3, 16, 3) kaiming_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) kaiming_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): kaiming_init(conv_module, distributi...
def test_caffe_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) caffe2_xavier_init(conv_module)
def test_bias_init_with_prob(): conv_module = nn.Conv2d(3, 16, 3) prior_prob = 0.1 normal_init(conv_module, bias=bias_init_with_prob(0.1)) bias = float((- np.log(((1 - prior_prob) / prior_prob)))) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit(): 'test ConstantInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = ConstantInit(val=1, bias=2, layer='Conv2d') func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[0].bias, ...
def test_xavierinit(): 'test XavierInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = XavierInit(bias=0.1, layer='Conv2d') func(model) assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1)) assert (not model[2].bias.allclose(torch.full_like(mo...
def test_normalinit(): 'test Normalinit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = NormalInit(mean=100, std=1e-05, bias=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(to...
def test_truncnormalinit(): 'test TruncNormalInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = TruncNormalInit(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert ...
def test_uniforminit(): '"test UniformInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear']) func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(m...
def test_kaiminginit(): 'test KaimingInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = KaimingInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bias, torch....
def test_caffe2xavierinit(): 'test Caffe2XavierInit.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = Caffe2XavierInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bia...
class FooModule(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(1, 2) self.conv2d = nn.Conv2d(3, 1, 3) self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit(): 'test PretrainedInit class.' modelA = FooModule() constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear']) modelA.apply(constant_func) modelB = FooModule() funcB = PretrainedInit(checkpoint='modelA.pth') modelC = nn.Linear(1, 2) funcC = Pretrai...
def test_initialize(): model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) foonet = FooModule() init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2) initialize(model, init_cfg) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) ...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n CommandLine:\n ...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = ...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilati...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.r...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) @pytest.mark.skipif(((torch.__version__ == 'parrots') and (not torch.cuda.is_available())), reason='parrots...
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1), (20, 20, 3, 3)]) def test_linear(in_w, in_h, in_feature, out_feature): x_empty = torch.randn(0, in_feature, requires_grad=True) torch.manual_seed(0) wrapper = Linear(in_feature, out_fea...
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10)) def test_nn_op_forward_called(): for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']: with patch(f'torch.nn.{m}.forward') as nn_module_forward: x_empty = torch.randn(0, 3, 10, 10) wrapper = eval(m)(3, 2, 1) wrapp...
@contextmanager def build_temporary_directory(): 'Build a temporary directory containing many files to test\n ``FileClient.list_dir_or_file``.\n\n . \n\n | -- dir1 \n\n | -- | -- text3.txt \n\n | -- dir2 \n\n | -- | -- dir3 \n\n | -- | -- | -- text4.txt \n\n | -- | -- img.jpg \n\n | -- ...
@contextmanager def delete_and_reset_method(obj, method): method_obj = deepcopy(getattr(type(obj), method)) try: delattr(type(obj), method) (yield) finally: setattr(type(obj), method, method_obj)
class MockS3Client(): def __init__(self, enable_mc=True): self.enable_mc = enable_mc def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content
class MockPetrelClient(): def __init__(self, enable_mc=True, enable_multi_cluster=False): self.enable_mc = enable_mc self.enable_multi_cluster = enable_multi_cluster def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content de...
class MockMemcachedClient(): def __init__(self, server_list_cfg, client_cfg): pass def Get(self, filepath, buffer): with open(filepath, 'rb') as f: buffer.content = f.read()
class TestFileClient(): @classmethod def setup_class(cls): cls.test_data_dir = (Path(__file__).parent / 'data') cls.img_path = (cls.test_data_dir / 'color.jpg') cls.img_shape = (300, 400, 3) cls.text_path = (cls.test_data_dir / 'filelist.txt') def test_error(self): ...
def _test_handler(file_format, test_obj, str_checker, mode='r+'): dump_str = mmcv.dump(test_obj, file_format=file_format) str_checker(dump_str) tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_dump') mmcv.dump(test_obj, tmp_filename, file_format=file_format) assert osp.isfile(tmp_filename...
def test_json(): def json_checker(dump_str): assert (dump_str in ['[{"a": "abc", "b": 1}, 2, "c"]', '[{"b": 1, "a": "abc"}, 2, "c"]']) _test_handler('json', obj_for_test, json_checker)
def test_yaml(): def yaml_checker(dump_str): assert (dump_str in ['- {a: abc, b: 1}\n- 2\n- c\n', '- {b: 1, a: abc}\n- 2\n- c\n', '- a: abc\n b: 1\n- 2\n- c\n', '- b: 1\n a: abc\n- 2\n- c\n']) _test_handler('yaml', obj_for_test, yaml_checker)
def test_pickle(): def pickle_checker(dump_str): import pickle assert (pickle.loads(dump_str) == obj_for_test) _test_handler('pickle', obj_for_test, pickle_checker, mode='rb+')
def test_exception(): test_obj = [{'a': 'abc', 'b': 1}, 2, 'c'] with pytest.raises(ValueError): mmcv.dump(test_obj) with pytest.raises(TypeError): mmcv.dump(test_obj, 'tmp.txt')
def test_register_handler(): @mmcv.register_handler('txt') class TxtHandler1(mmcv.BaseFileHandler): def load_from_fileobj(self, file): return file.read() def dump_to_fileobj(self, obj, file): file.write(str(obj)) def dump_to_str(self, obj, **kwargs): ...
def test_list_from_file(): filename = osp.join(osp.dirname(__file__), 'data/filelist.txt') filelist = mmcv.list_from_file(filename) assert (filelist == ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg']) filelist = mmcv.list_from_file(filename, prefix='a/') assert (filelist == ['a/1.jpg', 'a/2.jpg', 'a...
def test_dict_from_file(): filename = osp.join(osp.dirname(__file__), 'data/mapping.txt') mapping = mmcv.dict_from_file(filename) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename, key_type=int) assert (mapping == {1: 'cat', 2: ['dog', 'cow'...
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_tensor2imgs(): with pytest.raises(AssertionError): tensor = np.random.rand(2, 3, 3) mmcv.tensor2imgs(tensor) with pytest.raises(AssertionError): tensor = torch.randn(2, 3, 3) mmcv.tensor2imgs(tensor)...
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_set_mmcv_home(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home assert (_get_mmcv_home() == mmcv_home)
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_default_mmcv_home(): os.environ.pop(ENV_MMCV_HOME, None) os.environ.pop(ENV_XDG_CACHE_HOME, None) assert (_get_mmcv_home() == os.path.expanduser(os.path.join(DEFAULT_CACHE_DIR, 'mmcv'))) model_urls = get_external_models() ...
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_get_external_models(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home ext_urls = get_external_models() assert (ext_urls =...
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_get_deprecated_models(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home dep_urls = get_deprecated_model_names() assert (d...
def load_from_http(url, map_location=None): return ('url:' + url)
def load_url(url, map_location=None, model_dir=None): return load_from_http(url)
def load(filepath, map_location=None): return ('local:' + filepath)
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) @patch('mmcv.runner.checkpoint.load_from_http', load_from_http) @patch('mmcv.runner.checkpoint.load_url', load_url) @patch('torch.load', load) def test_load_external_url(): url = _load_checkpoint('modelzoo://resnet50') if (TORCH_VERSION < '1....
@pytest.mark.parametrize('device', ['cpu', pytest.param('cuda', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))]) def test_active_rotated_filter(device): feature = torch.tensor(np_feature, dtype=torch.float, device=device, requires_grad=True) indices = torch.tensor(n...
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_paconv_assign_scores(): scores = torch.tensor([[[[0.06947571, 0.6065746], [0.28462553, 0.8378516], [0.7595994, 0.97220325], [0.519155, 0.766185]], [[0.15348864, 0.6051019], [0.21510637, 0.31916398], [0.00236845, 0.584259...
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_ball_query(): new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289),...
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') class TestBBox(object): def _test_bbox_overlaps(self, dtype=torch.float): from mmcv.ops import bbox_overlaps b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0], [7.0, 7.0, 8.0, 8.0]]).cuda().type(dtype...
class TestBilinearGridSample(object): def _test_bilinear_grid_sample(self, dtype=torch.float, align_corners=False, multiplier=1, precision=0.001): from mmcv.ops.point_sample import bilinear_grid_sample input = torch.rand(1, 1, 20, 20, dtype=dtype) grid = torch.Tensor([[[1, 0, 0], [0, 1, 0...
def _test_border_align_allclose(device, dtype, pool_size): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') try: from mmcv.ops import BorderAlign, border_align except ModuleNotFoundError: pytest.skip('BorderAlign op is not successfully c...
@pytest.mark.parametrize('device', ['cuda']) @pytest.mark.parametrize('dtype', [torch.float, torch.half, torch.double]) @pytest.mark.parametrize('pool_size', [1, 2]) def test_border_align(device, dtype, pool_size): _test_border_align_allclose(device, dtype, pool_size)