code
stringlengths 17
6.64M
|
---|
def test_build_dataloader():
dataset = ToyDataset()
samples_per_gpu = 3
dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2)
assert (dataloader.batch_size == samples_per_gpu)
assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu))))
assert isinstance(dataloader.sampler, DistributedSampler)
assert dataloader.sampler.shuffle
dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, shuffle=False)
assert (dataloader.batch_size == samples_per_gpu)
assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu))))
assert isinstance(dataloader.sampler, DistributedSampler)
assert (not dataloader.sampler.shuffle)
dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, num_gpus=8)
assert (dataloader.batch_size == samples_per_gpu)
assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu))))
assert (dataloader.num_workers == 2)
dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, dist=False)
assert (dataloader.batch_size == samples_per_gpu)
assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu))))
assert isinstance(dataloader.sampler, RandomSampler)
assert (dataloader.num_workers == 2)
dataloader = build_dataloader(dataset, samples_per_gpu=3, workers_per_gpu=2, shuffle=False, dist=False)
assert (dataloader.batch_size == samples_per_gpu)
assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu))))
assert isinstance(dataloader.sampler, SequentialSampler)
assert (dataloader.num_workers == 2)
dataloader = build_dataloader(dataset, samples_per_gpu=3, workers_per_gpu=2, num_gpus=8, dist=False)
assert (dataloader.batch_size == (samples_per_gpu * 8))
assert (len(dataloader) == int(math.ceil(((len(dataset) / samples_per_gpu) / 8))))
assert isinstance(dataloader.sampler, RandomSampler)
assert (dataloader.num_workers == 16)
|
class TestLoading(object):
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../data')
def test_load_img(self):
results = dict(img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert (results['filename'] == osp.join(self.data_prefix, 'color.jpg'))
assert (results['ori_filename'] == 'color.jpg')
assert (results['img'].shape == (288, 512, 3))
assert (results['img'].dtype == np.uint8)
assert (results['img_shape'] == (288, 512, 3))
assert (results['ori_shape'] == (288, 512, 3))
assert (results['pad_shape'] == (288, 512, 3))
assert (results['scale_factor'] == 1.0)
np.testing.assert_equal(results['img_norm_cfg']['mean'], np.zeros(3, dtype=np.float32))
assert (repr(transform) == (transform.__class__.__name__ + "(to_float32=False,color_type='color',imdecode_backend='cv2')"))
results = dict(img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert (results['filename'] == 'tests/data/color.jpg')
assert (results['ori_filename'] == 'tests/data/color.jpg')
assert (results['img'].shape == (288, 512, 3))
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert (results['img'].dtype == np.float32)
results = dict(img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert (results['img'].shape == (288, 512, 3))
assert (results['img'].dtype == np.uint8)
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert (results['img'].shape == (288, 512))
assert (results['img'].dtype == np.uint8)
np.testing.assert_equal(results['img_norm_cfg']['mean'], np.zeros(1, dtype=np.float32))
def test_load_seg(self):
results = dict(seg_prefix=self.data_prefix, ann_info=dict(seg_map='seg.png'), seg_fields=[])
transform = LoadAnnotations()
results = transform(copy.deepcopy(results))
assert (results['seg_fields'] == ['gt_semantic_seg'])
assert (results['gt_semantic_seg'].shape == (288, 512))
assert (results['gt_semantic_seg'].dtype == np.uint8)
assert (repr(transform) == (transform.__class__.__name__ + "(reduce_zero_label=False,imdecode_backend='pillow')"))
results = dict(seg_prefix=None, ann_info=dict(seg_map='tests/data/seg.png'), seg_fields=[])
transform = LoadAnnotations()
results = transform(copy.deepcopy(results))
assert (results['gt_semantic_seg'].shape == (288, 512))
assert (results['gt_semantic_seg'].dtype == np.uint8)
transform = LoadAnnotations(reduce_zero_label=True)
results = transform(copy.deepcopy(results))
assert (results['gt_semantic_seg'].shape == (288, 512))
assert (results['gt_semantic_seg'].dtype == np.uint8)
results = dict(seg_prefix=self.data_prefix, ann_info=dict(seg_map='seg.png'), seg_fields=[])
transform = LoadAnnotations(imdecode_backend='pillow')
results = transform(copy.deepcopy(results))
assert (results['gt_semantic_seg'].shape == (288, 512))
assert (results['gt_semantic_seg'].dtype == np.uint8)
|
class ExampleDataset(Dataset):
def __getitem__(self, idx):
results = dict(img=torch.tensor([1]), img_metas=dict())
return results
def __len__(self):
return 1
|
class ExampleModel(nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, img, img_metas, test_mode=False, **kwargs):
return img
def train_step(self, data_batch, optimizer):
loss = self.forward(**data_batch)
return dict(loss=loss)
|
def test_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)]
EvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=model.parameters()))
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = EvalHook(data_loader)
runner = mmcv.runner.IterBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])], logger=runner.logger)
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
results = single_gpu_test(model, data_loader)
return results
|
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
def test_dist_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)]
DistEvalHook(data_loader)
test_dataset = ExampleDataset()
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=model.parameters()))
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = DistEvalHook(data_loader)
runner = mmcv.runner.IterBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with([torch.tensor([1])], logger=runner.logger)
|
def is_block(modules):
'Check if is ResNet building block.'
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX)):
return True
return False
|
def is_norm(modules):
'Check if is one of the norms.'
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
|
def all_zeros(modules):
'Check if the weight(and bias) is all zero.'
weight_zero = torch.allclose(modules.weight.data, torch.zeros_like(modules.weight.data))
if hasattr(modules, 'bias'):
bias_zero = torch.allclose(modules.bias.data, torch.zeros_like(modules.bias.data))
else:
bias_zero = True
return (weight_zero and bias_zero)
|
def check_norm_state(modules, train_state):
'Check if norm layer is in correct train state.'
for mod in modules:
if isinstance(mod, _BatchNorm):
if (mod.training != train_state):
return False
return True
|
def test_resnet_basic_block():
with pytest.raises(AssertionError):
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2')]
BasicBlock(64, 64, plugins=plugins)
block = BasicBlock(16, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 16, 56, 56]))
block = BasicBlock(64, 64)
assert (block.conv1.in_channels == 64)
assert (block.conv1.out_channels == 64)
assert (block.conv1.kernel_size == (3, 3))
assert (block.conv2.in_channels == 64)
assert (block.conv2.out_channels == 64)
assert (block.conv2.kernel_size == (3, 3))
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
|
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv4')]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert (block.conv1.stride == (1, 1))
assert (block.conv2.stride == (2, 2))
block = Bottleneck(64, 64, stride=2, style='caffe')
assert (block.conv1.stride == (2, 2))
assert (block.conv2.stride == (1, 1))
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
assert (block.nonlocal_block.in_channels == 16)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=3), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.context_block1.in_channels == 16)
assert (block.context_block2.in_channels == 64)
assert (block.context_block3.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
|
def test_resnet_res_layer():
layer = ResLayer(Bottleneck, 64, 16, 3)
assert (len(layer) == 3)
assert (layer[0].conv1.in_channels == 64)
assert (layer[0].conv1.out_channels == 16)
for i in range(1, len(layer)):
assert (layer[i].conv1.in_channels == 64)
assert (layer[i].conv1.out_channels == 16)
for i in range(len(layer)):
assert (layer[i].downsample is None)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
layer = ResLayer(Bottleneck, 64, 64, 3)
assert (layer[0].downsample[0].out_channels == 256)
for i in range(1, len(layer)):
assert (layer[i].downsample is None)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 256, 56, 56]))
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert (layer[0].downsample[0].out_channels == 256)
assert (layer[0].downsample[0].stride == (2, 2))
for i in range(1, len(layer)):
assert (layer[i].downsample is None)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 256, 28, 28]))
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert (layer[0].downsample[1].out_channels == 256)
assert (layer[0].downsample[1].stride == (1, 1))
for i in range(1, len(layer)):
assert (layer[i].downsample is None)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 256, 28, 28]))
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2)
for i in range(len(layer)):
assert (layer[i].conv2.dilation == (2, 2))
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, contract_dilation=True)
assert (layer[0].conv2.dilation == (1, 1))
for i in range(1, len(layer)):
assert (layer[i].conv2.dilation == (2, 2))
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, multi_grid=(1, 2, 4))
assert (layer[0].conv2.dilation == (1, 1))
assert (layer[1].conv2.dilation == (2, 2))
assert (layer[2].conv2.dilation == (4, 4))
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
|
def test_resnet_backbone():
'Test resnet backbone.'
with pytest.raises(KeyError):
ResNet(20)
with pytest.raises(AssertionError):
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True,))
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True), position='after_conv3')]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
ResNet(50, strides=(1,), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
model = ResNet(50)
model.init_weights(pretrained=0)
with pytest.raises(AssertionError):
ResNet(50, style='tensorflow')
model = ResNet(50, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
model = ResNet(depth=50, norm_eval=True)
model.init_weights('torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages)
model.init_weights()
model.train()
assert (model.norm1.training is False)
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
model = ResNetV1d(depth=50, frozen_stages=frozen_stages)
assert (len(model.stem) == 9)
model.init_weights()
model.train()
check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
model = ResNet(18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(50)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
model = ResNet(50, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 3)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(50, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
model = ResNet(50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), stages=(False, True, True, True), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True, False), position='after_conv3')]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'gen_attention_block'))
assert (m.nonlocal_block.in_channels == 64)
for m in model.layer2.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 128)
assert (m.gen_attention_block.in_channels == 128)
assert (m.context_block.in_channels == 512)
for m in model.layer3.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 256)
assert (m.gen_attention_block.in_channels == 256)
assert (m.context_block.in_channels == 1024)
for m in model.layer4.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 512)
assert (m.gen_attention_block.in_channels == 512)
assert (not hasattr(m, 'context_block'))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), stages=(False, True, True, False), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), stages=(False, True, True, False), position='after_conv3')]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'context_block1'))
assert (not hasattr(m, 'context_block2'))
for m in model.layer2.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (m.context_block1.in_channels == 512)
assert (m.context_block2.in_channels == 512)
for m in model.layer3.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (m.context_block1.in_channels == 1024)
assert (m.context_block2.in_channels == 1024)
for m in model.layer4.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'context_block1'))
assert (not hasattr(m, 'context_block2'))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
model = ResNet(50, zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
model = ResNetV1d(depth=50)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
|
def test_renext_bottleneck():
with pytest.raises(AssertionError):
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
block = BottleneckX(64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert (block.conv2.stride == (2, 2))
assert (block.conv2.groups == 32)
assert (block.conv2.out_channels == 128)
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
BottleneckX(64, 64, groups=32, base_width=4, dcn=dcn, conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
|
def test_resnext_backbone():
with pytest.raises(KeyError):
ResNeXt(depth=18)
model = ResNeXt(depth=50, groups=32, base_width=4)
print(model)
for m in model.modules():
if is_block(m):
assert (m.conv2.groups == 32)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
|
def test_fastscnn_backbone():
with pytest.raises(AssertionError):
FastSCNN(3, (32, 48), 64, (64, 96, 128), (2, 2, 1), global_out_channels=127, higher_in_channels=64, lower_in_channels=128)
model = FastSCNN()
model.init_weights()
model.train()
batch_size = 4
imgs = torch.randn(batch_size, 3, 512, 1024)
feat = model(imgs)
assert (len(feat) == 3)
assert (feat[0].shape == torch.Size([batch_size, 64, 64, 128]))
assert (feat[1].shape == torch.Size([batch_size, 128, 16, 32]))
assert (feat[2].shape == torch.Size([batch_size, 128, 64, 128]))
|
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
block = BottleneckS(64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert (block.avd_layer.stride == 2)
assert (block.conv2.channels == 256)
block = BottleneckS(64, 16, radix=2, reduction_factor=4)
x = torch.randn(2, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([2, 64, 56, 56]))
|
def test_resnest_backbone():
with pytest.raises(KeyError):
ResNeSt(depth=18)
model = ResNeSt(depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([2, 256, 56, 56]))
assert (feat[1].shape == torch.Size([2, 512, 28, 28]))
assert (feat[2].shape == torch.Size([2, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([2, 2048, 7, 7]))
|
def _conv_has_norm(module, sync_bn):
for m in module.modules():
if isinstance(m, ConvModule):
if (not m.with_norm):
return False
if sync_bn:
if (not isinstance(m.bn, SyncBatchNorm)):
return False
return True
|
def to_cuda(module, data):
module = module.cuda()
if isinstance(data, list):
for i in range(len(data)):
data[i] = data[i].cuda()
return (module, data)
|
@patch.multiple(BaseDecodeHead, __abstractmethods__=set())
def test_decode_head():
with pytest.raises(AssertionError):
BaseDecodeHead([32, 16], 16, num_classes=19)
with pytest.raises(AssertionError):
BaseDecodeHead(32, 16, num_classes=19, in_index=[(- 1), (- 2)])
with pytest.raises(AssertionError):
BaseDecodeHead(32, 16, num_classes=19, input_transform='concat')
with pytest.raises(AssertionError):
BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat')
with pytest.raises(AssertionError):
BaseDecodeHead([32], 16, in_index=(- 1), num_classes=19, input_transform='resize_concat')
with pytest.raises(AssertionError):
BaseDecodeHead([32, 16], 16, num_classes=19, in_index=[(- 1)], input_transform='resize_concat')
head = BaseDecodeHead(32, 16, num_classes=19)
assert (hasattr(head, 'dropout') and (head.dropout.p == 0.1))
head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2)
assert (hasattr(head, 'dropout') and (head.dropout.p == 0.2))
inputs = [torch.randn(1, 32, 45, 45)]
head = BaseDecodeHead(32, 16, num_classes=19)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.in_channels == 32)
assert (head.input_transform is None)
transformed_inputs = head._transform_inputs(inputs)
assert (transformed_inputs.shape == (1, 32, 45, 45))
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = BaseDecodeHead([32, 16], 16, num_classes=19, in_index=[0, 1], input_transform='resize_concat')
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.in_channels == 48)
assert (head.input_transform == 'resize_concat')
transformed_inputs = head._transform_inputs(inputs)
assert (transformed_inputs.shape == (1, 48, 45, 45))
|
def test_fcn_head():
with pytest.raises(AssertionError):
FCNHead(num_classes=19, num_convs=0)
head = FCNHead(in_channels=32, channels=16, num_classes=19)
for m in head.modules():
if isinstance(m, ConvModule):
assert (not m.with_norm)
head = FCNHead(in_channels=32, channels=16, num_classes=19, norm_cfg=dict(type='SyncBN'))
for m in head.modules():
if isinstance(m, ConvModule):
assert (m.with_norm and isinstance(m.bn, SyncBatchNorm))
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, concat_input=False)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (len(head.convs) == 2)
assert ((not head.concat_input) and (not hasattr(head, 'conv_cat')))
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, concat_input=True)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (len(head.convs) == 2)
assert head.concat_input
assert (head.conv_cat.in_channels == 48)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert (head.convs[i].kernel_size == (3, 3))
assert (head.convs[i].padding == 1)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert (head.convs[i].kernel_size == (1, 1))
assert (head.convs[i].padding == 0)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
inputs = [torch.randn(1, 32, 45, 45)]
head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (len(head.convs) == 1)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_psp_head():
with pytest.raises(AssertionError):
PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
head = PSPHead(in_channels=32, channels=16, num_classes=19)
assert (not _conv_has_norm(head, sync_bn=False))
head = PSPHead(in_channels=32, channels=16, num_classes=19, norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.psp_modules[0][0].output_size == 1)
assert (head.psp_modules[1][0].output_size == 2)
assert (head.psp_modules[2][0].output_size == 3)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_aspp_head():
with pytest.raises(AssertionError):
ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=1)
head = ASPPHead(in_channels=32, channels=16, num_classes=19)
assert (not _conv_has_norm(head, sync_bn=False))
head = ASPPHead(in_channels=32, channels=16, num_classes=19, norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.aspp_modules[0].conv.dilation == (1, 1))
assert (head.aspp_modules[1].conv.dilation == (12, 12))
assert (head.aspp_modules[2].conv.dilation == (24, 24))
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_psa_head():
with pytest.raises(AssertionError):
PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), psa_type='gather')
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
assert (not _conv_has_norm(head, sync_bn=False))
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), shrink_factor=1)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), psa_softmax=True)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), psa_type='collect')
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), shrink_factor=1, psa_type='collect')
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), psa_type='collect', shrink_factor=1, compact=True)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
inputs = [torch.randn(1, 32, 39, 39)]
head = PSAHead(in_channels=32, channels=16, num_classes=19, mask_size=(39, 39), psa_type='distribute')
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 39, 39))
|
def test_gc_head():
head = GCHead(in_channels=32, channels=16, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'gc_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_nl_head():
head = NLHead(in_channels=32, channels=16, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_cc_head():
head = CCHead(in_channels=32, channels=16, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'cca')
if (not torch.cuda.is_available()):
pytest.skip('CCHead requires CUDA')
inputs = [torch.randn(1, 32, 45, 45)]
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_uper_head():
with pytest.raises(AssertionError):
UPerHead(in_channels=32, channels=16, num_classes=19)
head = UPerHead(in_channels=[32, 16], channels=16, num_classes=19, in_index=[(- 2), (- 1)])
assert (not _conv_has_norm(head, sync_bn=False))
head = UPerHead(in_channels=[32, 16], channels=16, num_classes=19, norm_cfg=dict(type='SyncBN'), in_index=[(- 2), (- 1)])
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = UPerHead(in_channels=[32, 16], channels=16, num_classes=19, in_index=[(- 2), (- 1)])
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_ann_head():
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = ANNHead(in_channels=[16, 32], channels=16, num_classes=19, in_index=[(- 2), (- 1)], project_channels=8)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 21, 21))
|
def test_da_head():
inputs = [torch.randn(1, 32, 45, 45)]
head = DAHead(in_channels=32, channels=16, num_classes=19, pam_channels=8)
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (isinstance(outputs, tuple) and (len(outputs) == 3))
for output in outputs:
assert (output.shape == (1, head.num_classes, 45, 45))
test_output = head.forward_test(inputs, None, None)
assert (test_output.shape == (1, head.num_classes, 45, 45))
|
def test_ocr_head():
inputs = [torch.randn(1, 32, 45, 45)]
ocr_head = OCRHead(in_channels=32, channels=16, num_classes=19, ocr_channels=8)
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
(head, inputs) = to_cuda(ocr_head, inputs)
(head, inputs) = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
output = ocr_head(inputs, prev_output)
assert (output.shape == (1, ocr_head.num_classes, 45, 45))
|
def test_enc_head():
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(in_channels=[32], channels=16, num_classes=19, in_index=[(- 1)])
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (isinstance(outputs, tuple) and (len(outputs) == 2))
assert (outputs[0].shape == (1, head.num_classes, 21, 21))
assert (outputs[1].shape == (1, head.num_classes))
inputs = [torch.randn(1, 32, 21, 21)]
head = EncHead(in_channels=[32], channels=16, use_se_loss=False, num_classes=19, in_index=[(- 1)])
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 21, 21))
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
head = EncHead(in_channels=[16, 32], channels=16, add_lateral=True, num_classes=19, in_index=[(- 2), (- 1)])
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (isinstance(outputs, tuple) and (len(outputs) == 2))
assert (outputs[0].shape == (1, head.num_classes, 21, 21))
assert (outputs[1].shape == (1, head.num_classes))
test_output = head.forward_test(inputs, None, None)
assert (test_output.shape == (1, head.num_classes, 21, 21))
|
def test_dw_aspp_head():
inputs = [torch.randn(1, 32, 45, 45)]
head = DepthwiseSeparableASPPHead(c1_in_channels=0, c1_channels=0, in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.c1_bottleneck is None)
assert (head.aspp_modules[0].conv.dilation == (1, 1))
assert (head.aspp_modules[1].depthwise_conv.dilation == (12, 12))
assert (head.aspp_modules[2].depthwise_conv.dilation == (24, 24))
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
inputs = [torch.randn(1, 8, 45, 45), torch.randn(1, 32, 21, 21)]
head = DepthwiseSeparableASPPHead(c1_in_channels=8, c1_channels=4, in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.c1_bottleneck.in_channels == 8)
assert (head.c1_bottleneck.out_channels == 4)
assert (head.aspp_modules[0].conv.dilation == (1, 1))
assert (head.aspp_modules[1].depthwise_conv.dilation == (12, 12))
assert (head.aspp_modules[2].depthwise_conv.dilation == (24, 24))
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_sep_fcn_head():
head = DepthwiseSeparableFCNHead(in_channels=128, channels=128, concat_input=False, num_classes=19, in_index=(- 1), norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(2, 128, 32, 32)]
output = head(x)
assert (output.shape == (2, head.num_classes, 32, 32))
assert (not head.concat_input)
from mmseg.ops.separable_conv_module import DepthwiseSeparableConvModule
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
assert (head.conv_seg.kernel_size == (1, 1))
head = DepthwiseSeparableFCNHead(in_channels=64, channels=64, concat_input=True, num_classes=19, in_index=(- 1), norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(3, 64, 32, 32)]
output = head(x)
assert (output.shape == (3, head.num_classes, 32, 32))
assert head.concat_input
from mmseg.ops.separable_conv_module import DepthwiseSeparableConvModule
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
|
def test_dnl_head():
head = DNLHead(in_channels=32, channels=16, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'dnl_block')
assert (head.dnl_block.temperature == 0.05)
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
head = DNLHead(in_channels=32, channels=16, num_classes=19, mode='dot_product')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
head = DNLHead(in_channels=32, channels=16, num_classes=19, mode='gaussian')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
head = DNLHead(in_channels=32, channels=16, num_classes=19, mode='concatenation')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_emanet_head():
head = EMAHead(in_channels=32, ema_channels=24, channels=16, num_stages=3, num_bases=16, num_classes=19)
for param in head.ema_mid_conv.parameters():
assert (not param.requires_grad)
assert hasattr(head, 'ema_module')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45))
|
def test_point_head():
inputs = [torch.randn(1, 32, 45, 45)]
point_head = PointHead(in_channels=[32], in_index=[0], channels=16, num_classes=19)
assert (len(point_head.fcs) == 3)
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
(head, inputs) = to_cuda(point_head, inputs)
(head, inputs) = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
test_cfg = ConfigDict(subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
output = point_head.forward_test(inputs, prev_output, None, test_cfg)
assert (output.shape == (1, point_head.num_classes, 180, 180))
|
def test_fpn():
in_channels = [256, 512, 1024, 2048]
inputs = [torch.randn(1, c, (56 // (2 ** i)), (56 // (2 ** i))) for (i, c) in enumerate(in_channels)]
fpn = FPN(in_channels, 256, len(in_channels))
outputs = fpn(inputs)
assert (outputs[0].shape == torch.Size([1, 256, 56, 56]))
assert (outputs[1].shape == torch.Size([1, 256, 28, 28]))
assert (outputs[2].shape == torch.Size([1, 256, 14, 14]))
assert (outputs[3].shape == torch.Size([1, 256, 7, 7]))
|
def test_depthwise_separable_conv():
with pytest.raises(AssertionError):
DepthwiseSeparableConvModule(4, 8, 2, groups=2)
conv = DepthwiseSeparableConvModule(3, 8, 2)
assert (conv.depthwise_conv.conv.groups == 3)
assert (conv.pointwise_conv.conv.kernel_size == (1, 1))
assert (not conv.depthwise_conv.with_norm)
assert (not conv.pointwise_conv.with_norm)
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN'))
assert (conv.depthwise_conv.norm_name == 'bn')
assert (not conv.pointwise_conv.with_norm)
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN'))
assert (not conv.depthwise_conv.with_norm)
assert (conv.pointwise_conv.norm_name == 'bn')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.depthwise_conv.conv, 'weight_orig')
assert hasattr(conv.pointwise_conv.conv, 'weight_orig')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
|
def _context_for_ohem():
return FCNHead(in_channels=32, channels=16, num_classes=19)
|
def test_ohem_sampler():
with pytest.raises(AssertionError):
sampler = OHEMPixelSampler(context=_context_for_ohem())
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 89, 89))
sampler.sample(seg_logit, seg_label)
sampler = OHEMPixelSampler(context=_context_for_ohem(), thresh=0.7, min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert (seg_weight.shape[0] == seg_logit.shape[0])
assert (seg_weight.shape[1:] == seg_logit.shape[2:])
assert (seg_weight.sum() > 200)
sampler = OHEMPixelSampler(context=_context_for_ohem(), min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert (seg_weight.shape[0] == seg_logit.shape[0])
assert (seg_weight.shape[1:] == seg_logit.shape[2:])
assert (seg_weight.sum() == 200)
|
def test_inv_residual():
with pytest.raises(AssertionError):
InvertedResidual(32, 32, 3, 4)
inv_module = InvertedResidual(32, 32, 1, 4)
assert inv_module.use_res_connect
assert (inv_module.conv[0].kernel_size == (1, 1))
assert (inv_module.conv[0].padding == 0)
assert (inv_module.conv[1].kernel_size == (3, 3))
assert (inv_module.conv[1].padding == 1)
assert inv_module.conv[0].with_norm
assert inv_module.conv[1].with_norm
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert (output.shape == (1, 32, 64, 64))
inv_module = InvertedResidual(32, 32, 2, 4)
assert (not inv_module.use_res_connect)
assert (inv_module.conv[0].kernel_size == (1, 1))
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert (output.shape == (1, 32, 32, 32))
inv_module = InvertedResidual(32, 32, 1, 1)
assert (inv_module.conv[0].kernel_size == (3, 3))
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert (output.shape == (1, 32, 64, 64))
|
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--log-interval', type=int, default=50, help='interval of logging')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
torch.backends.cudnn.benchmark = False
cfg.model.pretrained = None
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False)
model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, map_location='cpu')
model = MMDataParallel(model, device_ids=[0])
model.eval()
num_warmup = 5
pure_inf_time = 0
total_iters = 200
for (i, data) in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = (time.perf_counter() - start_time)
if (i >= num_warmup):
pure_inf_time += elapsed
if (((i + 1) % args.log_interval) == 0):
fps = (((i + 1) - num_warmup) / pure_inf_time)
print(f'Done image [{(i + 1):<3}/ {total_iters}], fps: {fps:.2f} img / s')
if ((i + 1) == total_iters):
fps = (((i + 1) - num_warmup) / pure_inf_time)
print(f'Overall fps: {fps:.2f} img / s')
break
|
def convert_json_to_label(json_file):
label_file = json_file.replace('_polygons.json', '_labelTrainIds.png')
json2labelImg(json_file, label_file, 'trainIds')
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert Cityscapes annotations to TrainIds')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument('--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = (args.out_dir if args.out_dir else cityscapes_path)
mmcv.mkdir_or_exist(out_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
poly_files = []
for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True):
poly_file = osp.join(gt_dir, poly)
poly_files.append(poly_file)
if (args.nproc > 1):
mmcv.track_parallel_progress(convert_json_to_label, poly_files, args.nproc)
else:
mmcv.track_progress(convert_json_to_label, poly_files)
split_names = ['train', 'val', 'test']
for split in split_names:
filenames = []
for poly in mmcv.scandir(osp.join(gt_dir, split), '_polygons.json', recursive=True):
filenames.append(poly.replace('_gtFine_polygons.json', ''))
with open(osp.join(out_dir, f'{split}.txt'), 'w') as f:
f.writelines(((f + '\n') for f in filenames))
|
def convert_mat(mat_file, in_dir, out_dir):
data = loadmat(osp.join(in_dir, mat_file))
mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)
seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))
Image.fromarray(mask).save(seg_filename, 'PNG')
|
def generate_aug_list(merged_list, excluded_list):
return list((set(merged_list) - set(excluded_list)))
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
devkit_path = args.devkit_path
aug_path = args.aug_path
nproc = args.nproc
if (args.out_dir is None):
out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
else:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
in_dir = osp.join(aug_path, 'dataset', 'cls')
mmcv.track_parallel_progress(partial(convert_mat, in_dir=in_dir, out_dir=out_dir), list(mmcv.scandir(in_dir, suffix='.mat')), nproc=nproc)
full_aug_list = []
with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'train.txt')) as f:
ori_train_list = [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'val.txt')) as f:
val_list = [line.strip() for line in f]
aug_train_list = generate_aug_list((ori_train_list + full_aug_list), val_list)
assert (len(aug_train_list) == AUG_LEN), 'len(aug_train_list) != {}'.format(AUG_LEN)
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'trainaug.txt'), 'w') as f:
f.writelines(((line + '\n') for line in aug_train_list))
aug_list = generate_aug_list(full_aug_list, (ori_train_list + val_list))
assert (len(aug_list) == (AUG_LEN - len(ori_train_list))), 'len(aug_list) != {}'.format((AUG_LEN - len(ori_train_list)))
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'), 'w') as f:
f.writelines(((line + '\n') for line in aug_list))
print('Done!')
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[2048, 1024], help='input image size')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_segmentor(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('FLOPs counter is currently not currently supported with {}'.format(model.__class__.__name__))
(flops, params) = get_model_complexity_info(model, input_shape)
split_line = ('=' * 30)
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(split_line, input_shape, flops, params))
print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.')
|
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument('--options', nargs='+', action=DictAction, help='arguments in dict')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
print(f'''Config:
{cfg.pretty_text}''')
cfg.dump('example.py')
|
def parse_args():
parser = argparse.ArgumentParser(description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
|
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = (out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]))
subprocess.Popen(['mv', out_file, final_file])
|
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
|
def parse_args():
parser = argparse.ArgumentParser(description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "mIoU" for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
cfg.data.test.pipeline[1].img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = build_segmentor(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
print('model={}'.format(model))
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.load_from is not None):
cfg.load_from = args.load_from
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmseg_version=f'{__version__}+{get_git_hash()[:7]}', config=cfg.pretty_text, CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE)
model.CLASSES = datasets[0].CLASSES
train_segmentor(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
|
class Mesh():
def __init__(self, name, geometry=None, geometry_path='', placement=None, color=(1.0, 0.0, 0.0, 1.0), scale=(1.0, 1.0, 1.0)):
super().__init__()
if (placement is None):
placement = pin.SE3.Identity()
assert isinstance(placement, pin.SE3), 'Use pin.SE3(R, t) with R 3x3, t 3 to define a placement'
self.name = name
if geometry:
self.geometry = geometry
if geometry_path:
current_dir = os.path.dirname(__file__)
geometry_path = os.path.realpath(os.path.join(current_dir, geometry_path))
meshloader = hppfcl.MeshLoader()
self.geometry = meshloader.load(geometry_path, scale=np.array(scale, dtype=float))
self.geometry_path = geometry_path
self.placement = placement
self.color = np.array(color)
self.scale = np.array(scale)
def geom_obj(self, parent_frame=0, parent_joint=0, name=None):
if (name is None):
name = self.name
geom_obj = pin.GeometryObject(name=name, parent_joint=parent_joint, parent_frame=parent_frame, collision_geometry=self.geometry, placement=self.placement, mesh_path=self.geometry_path, mesh_scale=self.scale, override_material=True, mesh_color=self.color)
return geom_obj
|
class Open3DVisualizer():
def __init__(self):
self.viz = None
self.pcd = o3d.geometry.PointCloud()
def __del__(self):
if (self.viz is not None):
self.viz.destroy_window()
def _create_viz(self):
self.viz = o3d.visualization.Visualizer()
self.viz.create_window()
self.viz.add_geometry(self.pcd)
def show_pcd(self, points, normals, colors=None, blocking=False):
new_instance = (self.viz is None)
if (new_instance and (not blocking)):
self._create_viz()
if new_instance:
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.normals = o3d.utility.Vector3dVector(normals)
if (colors is not None):
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if (not blocking):
if new_instance:
self.viz.add_geometry(self.pcd)
else:
self.viz.update_geometry(self.pcd)
self.viz.poll_events()
self.viz.update_renderer()
else:
o3d.visualization.draw_geometries([self.pcd])
def show_voxels(self, voxels, blocking=True):
if (not blocking):
raise NotImplementedError
o3d.visualization.draw_geometries([voxels])
|
class Visualizer():
def __init__(self, name, model_wrapper):
self.name = name
if (self.name == 'meshcat'):
self.viz_class = MeshcatVisualizer
elif (self.name == 'gepetto'):
self.viz_class = GepettoVisualizer
else:
raise ValueError(f'Unknown visualizer: {self.name}')
self.model_wrapper = model_wrapper
self.model_wrapper.create_data()
self.node_name = 'core'
self._create_viz()
def _create_viz(self):
model = self.model_wrapper.model
geom_model = self.model_wrapper.geom_model
self.viz = self.viz_class(model, geom_model, geom_model)
self.viz.initViewer()
node_name = self.node_name
if (self.name == 'gepetto'):
self.gui = self.viz.viewer.gui
gui = self.gui
self.window_id = gui.getWindowList()[0]
if gui.nodeExists(f'world/{node_name}'):
gui.deleteNode(f'world/{node_name}', True)
gui.setBackgroundColor1(self.window_id, (1, 1, 1, 1))
gui.setBackgroundColor2(self.window_id, (1, 1, 1, 0.5))
self.viz.loadViewerModel(node_name)
def _update_data(self):
self.model_wrapper.create_data()
self._create_viz()
def display(self, qw=None):
if (qw is None):
qw = self.model_wrapper.neutral_configuration()
q = qw.q
self.viz.display(q)
def add_geom_obj(self, geom_obj, update_data=True):
geom_model = self.model_wrapper.geom_model
geom_model.addGeometryObject(geom_obj)
if update_data:
self._update_data()
def show_bounds(self, bounds):
pos = bounds.mean(axis=0)
size = (bounds[1] - bounds[0]).astype(float)
workspace = Mesh(name='workspace', geometry=hppfcl.Box(*size), placement=pin.SE3(np.eye(3), pos), color=(0, 0, 1, 0.5))
self.add_mesh(workspace, update_data=True)
def show_joints(self):
raise ValueError('To be reimplemented')
goal_jts = utils.get_oMi(self.model, self.data, self.goal_state['q'])
for (i, jt_se3) in enumerate(goal_jts):
self.add_mesh(f'jt{i}', geom=hppfcl.Sphere(0.07), placement=jt_se3, check_collision=False, color=(0, 0, 1, 1.0))
self._create_data()
self._create_viz()
def show_robot_aabb(self):
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple((int(value[i:(i + (lv // 3))], 16) for i in range(0, lv, (lv // 3))))
geom_objs = self.geom_model.geometryObjects
geometries = [geom_obj.geometry for geom_obj in geom_objs]
parents = [geom_obj.parentJoint for geom_obj in geom_objs]
i = 0
colors = cycle(['#377eb8', '#e41a1c', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf'])
for (geom, parent_id, color) in zip(geometries, parents, colors):
aabb = geom.aabb_local
(w, h, d) = (aabb.width(), aabb.height(), aabb.depth())
box = hppfcl.Box(w, h, d)
placement = pin.SE3(np.eye(3), aabb.center())
geom_obj = pin.GeometryObject(f'aabb{i}', 0, parent_id, box, placement)
color = (np.array(hex_to_rgb(color)) / 255)
geom_obj.meshColor = np.array((color[0], color[1], color[2], 0.5))
self.viz_model.addGeometryObject(geom_obj)
i += 1
print('show aabb')
self._create_data()
self._create_viz()
def show_obstacles_pin(self, obstacles):
for (i, sample) in enumerate(obstacles):
rot = eigenpy.Quaternion.FromTwoVectors(np.array((0, 0, 1)), sample[3:])
rot = rot.toRotationMatrix()
cone = Mesh(name=f'surf{i}', geometry=hppfcl.Cone(0.03, 0.1), placement=pin.SE3(rot, sample[:3]), color=(0, 0, 0, 0.8))
self.add_mesh(cone, update_data=False)
self._update_data()
def create_roadmap(self, name, color):
if (not (self.name == 'gepetto')):
raise ValueError('Only implemented for gepetto-gui')
roadmap_name = f'world/{self.node_name}/{name}'
self.gui.createRoadmap(roadmap_name, (0, 0, 0, 1), 1, 1, color)
def add_edge_to_roadmap(self, name, start, end):
roadmap_name = f'world/{self.node_name}/{name}'
self.gui.addEdgeToRoadmap(roadmap_name, list(start), list(end))
def display_tree(self, nodes, name, color, create_roadmap=True):
if create_roadmap:
self.create_roadmap(name, color=color)
drawn_edges = []
for node in nodes:
edge = None
if node.parent:
edge = (node.parent.point[:3], node.point[:3])
edge = tuple(map(tuple, edge))
if (edge not in drawn_edges):
self.add_edge_to_roadmap(name, edge[0], edge[1])
drawn_edges.append(edge)
|
def load_dataset_geoms(filename):
with open(filename, 'rb') as f:
geoms_pkl = pkl.load(f)
dataset_geoms = []
for geoms_dict in geoms_pkl['geoms_dicts']:
geoms = Geometries()
geoms.from_dict(geoms_dict)
dataset_geoms.append(geoms)
return dataset_geoms
|
def display_start_goal(viz, robot, state, goal_state, dist_goal, start_color, goal_color):
if (viz is None):
raise ValueError('No visualizer instantiated.')
start_oMg = state.oMg
goal_oMg = goal_state.oMg
start_oMg_np = robot.get_oMg_np(start_oMg)
goal_oMg_np = robot.get_oMg_np(goal_oMg)
for (i, (start_i, goal_i)) in enumerate(zip(start_oMg_np, goal_oMg_np)):
start_robot = robot.make_geom_obj(f'start{i}', i)
goal_robot = robot.make_geom_obj(f'goal{i}', i)
start_robot.placement = (pin.SE3(start_i) * start_robot.placement)
start_robot.meshColor = start_color
goal_robot.placement = (pin.SE3(goal_i) * goal_robot.placement)
goal_robot.meshColor = goal_color
for geom_obj in [start_robot, goal_robot]:
viz.add_geom_obj(geom_obj)
|
def get_bounds_geom_objs(pos_bounds):
'\n Generate 6 faces corresponding to the agent deplacement bounds\n '
size = (pos_bounds[1] - pos_bounds[0])
center = np.mean(pos_bounds, axis=0)
thickness = 0.05
color = (1, 1, 1, 0.3)
geom_objs = []
aas = [eigenpy.AngleAxis(0, np.array([1, 0, 0])), eigenpy.AngleAxis((np.pi / 2), np.array([0, 1, 0])), eigenpy.AngleAxis((np.pi / 2), np.array([0, 0, 1]))]
placement = pin.SE3.Identity()
for (i, angle_axis) in enumerate(aas):
placement.rotation = angle_axis.matrix()
size_bound = size.copy()
translation = np.zeros(3)
size_bound[i] = thickness
translation[i] = (((- size[i]) / 2) - (thickness / 2.1))
for j in range(2):
geom = hppfcl.Box(*size_bound)
placement.translation = translation
mesh = Mesh(name=f'bound{i}{j}', geometry=geom, placement=placement, color=color)
geom_objs.append(mesh.geom_obj())
translation *= (- 1)
return geom_objs
|
class BaseObserver(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.obs_shape = self.env.obs_shape
self.obs_indices = self.env.obs_indices
self.observation_space = self.env.observation_space
def set_eval(self):
self.env.set_eval()
def observe(self):
observation = self.env.observe()
return self.observation(observation)
def add_observation(self, name, obs_size):
'\n check obs_indices definition in __init__\n handle rlkit concatenation logic:\n if the goal dimension is passed, it should not count in the global observation shape\n given (o, g) a tuple of (observation, goal), rlkit computes\n a = policy((o, g)), g is thus always put at the end\n '
if (name is 'goal'):
self.obs_indices[name] = slice((- obs_size), None)
else:
self.obs_indices[name] = slice(self.obs_shape, (self.obs_shape + obs_size))
self.obs_shape += obs_size
self.update_observation_box('observation', self.obs_shape)
def update_observation_box(self, name, shape):
box = self.observation_space[name]
box.low = (- np.ones(shape))
box.high = np.ones(shape)
box.shape = (shape,)
|
class Node():
def __init__(self, point, parent):
if (not ((parent is None) or isinstance(parent, Node))):
raise ValueError('Parent should be None or Node type')
self.parent = parent
self.point = point
def path_from_root(self):
node = self
path = []
while (node is not None):
path.append(node.point)
node = node.parent
return path[::(- 1)]
|
def nearest_neighbor(x, nodes, distance_fn):
dist = [distance_fn(x, n.point) for n in nodes]
idx = np.argmin(dist)
return nodes[idx]
|
def rrt_bidir(start, goal, sample_fn, expand_fn, distance_fn, close_fn, iterations):
nodes_ab = [[], []]
for (i, x) in enumerate((start, goal)):
node = Node(x, parent=None)
nodes_ab[i].append(node)
solution = {'points': [], 'collisions': [], 'n_samples': 0, 'n_collisions': 0}
growing_index = 0
for i in range(iterations):
(nodes_a, nodes_b) = (nodes_ab[growing_index], nodes_ab[(1 - growing_index)])
x_rand = sample_fn()
node_a = nearest_neighbor(x_rand, nodes_a, distance_fn)
solution['n_samples'] += 1
x_a = node_a.point
(x_a_new, col_free_a) = expand_fn(x_a, x_rand)
if (col_free_a and (not close_fn(x_a, x_a_new))):
node_a_new = Node(x_a_new, parent=node_a)
nodes_ab[growing_index].append(node_a_new)
node_b = nearest_neighbor(x_a_new, nodes_b, distance_fn)
solution['n_samples'] += 1
x_b = node_b.point
(x_b_new, col_free_b) = expand_fn(x_b, x_a_new)
if (col_free_b and (not close_fn(x_b, x_b_new))):
node_b_new = Node(x_b_new, parent=node_b)
nodes_ab[(1 - growing_index)].append(node_b_new)
if close_fn(x_a_new, x_b_new):
if (growing_index == 1):
(node_a_new, node_b_new) = (node_b_new, node_a_new)
seq_start_a = node_a_new.path_from_root()
seq_b_goal = node_b_new.path_from_root()[::(- 1)]
seq = (seq_start_a + seq_b_goal[1:])
solution['points'] = seq
return (True, solution, nodes_ab, (2 * i))
if (len(nodes_ab[0]) == len(nodes_ab[1])):
growing_index = np.random.binomial(1, 0.5)
elif (len(nodes_ab[0]) > len(nodes_ab[1])):
growing_index = (1 - growing_index)
return (False, {'collisions': solution['collisions']}, nodes_ab, (2 * iterations))
|
def solve(env, delta_growth, iterations, simplify):
'\n collision_fn : maps x to True (free) / False (collision)\n sample_fn : return a configuration\n '
algo = rrt_bidir.rrt_bidir
model_wrapper = env.model_wrapper
delta_collision_check = env.delta_collision_check
action_range = env.robot_props['action_range']
def collision_fn(q):
return (not model_wrapper.collision(q))
def sample_fn():
return model_wrapper.random_configuration()
def distance_fn(q0, q1):
return model_wrapper.distance(q0, q1)
def interpolate_fn(q0, q1, t):
return model_wrapper.interpolate(q0, q1, t)
def arange_fn(q0, q1, resolution):
return model_wrapper.arange(q0, q1, resolution)
def expand_fn(q0, q1, limit_growth=True):
if limit_growth:
dist = distance_fn(q0, q1)
t1 = (min(dist, delta_growth) / (dist + EPSILON))
q1 = interpolate_fn(q0, q1, t1)
path = arange_fn(q0, q1, delta_collision_check)
(q_stop, collide) = env.stopping_configuration(path)
return (q_stop, (not collide))
def close_fn(qw0, qw1):
return np.allclose(qw0.q, qw1.q)
start = env.state
goal = env.goal_state
(success, path, trees, iterations) = algo(start, goal, sample_fn, expand_fn, distance_fn, close_fn, iterations=iterations)
iterations_simplify = 0
if success:
if simplify:
(path['points'], iterations_simplify) = utils.shorten(path['points'], expand_fn, interpolate_fn, distance_fn)
path['points'] = utils.limit_step_size(path['points'], arange_fn, action_range)
else:
path['points'] = np.array(path['points'])
path['collisions'] = np.array(path['collisions'])
path['start'] = path['points'][0]
path['goal'] = path['points'][(- 1)]
return (success, path, trees, (iterations + iterations_simplify))
|
def shorten(path, expand_fn, interpolate_fn, distance_fn):
path = list(path)
current_idx = 0
target_idx = (len(path) - 1)
it = 0
while (current_idx < target_idx):
point = path[current_idx]
target = path[target_idx]
(q_stop, free) = expand_fn(point, target, limit_growth=False)
if free:
path = (path[:(current_idx + 1)] + path[target_idx:])
target_idx = current_idx
current_idx = 0
else:
current_idx += 1
it += 1
return (path, it)
|
def limit_step_size(path, arange_fn, step_size):
n = len(path)
new_path = []
for i in range((n - 1)):
new_path += arange_fn(path[i], path[(i + 1)], step_size)
return new_path
|
class Robot():
def __init__(self):
self.link_dim = None
def get_neutral(self):
return pin.neutral(self.model)
def _set_collision_pairs(self, model, geom_model):
raise NotImplementedError
def _build_from_urdf(self, model_wrapper, urdf_path, package_path):
model = model_wrapper.model
geom_model = model_wrapper.geom_model
pin.buildModelFromUrdf(urdf_path, model)
pin.buildGeomFromUrdf(model, urdf_path, pin.GeometryType.COLLISION, geom_model, package_path)
model_data = model.createData()
geom_model_data = geom_model.createData()
def _build_from_mesh(self, model_wrapper, mesh, bounds=None):
model = model_wrapper.model
geom_model = model_wrapper.geom_model
geom_obj = mesh.geom_obj()
placement = geom_obj.placement.copy()
geom_obj.placement = pin.SE3.Identity()
geom_obj.q_placement = placement
q_freeflyer = pin.SE3ToXYZQUAT(placement).copy()
free_flyer = pin.JointModelFreeFlyer()
universe_joint = 0
joint_name = f'freeflyer_{mesh.name}'
if (bounds is not None):
parent_joint = model.addJoint(universe_joint, free_flyer, pin.SE3.Identity(), joint_name=joint_name, max_effort=np.array([1000]), max_velocity=np.array([1000]), min_config=bounds[0], max_config=bounds[1])
else:
parent_joint = model.addJoint(universe_joint, free_flyer, pin.SE3.Identity(), joint_name)
geom_obj.parentJoint = parent_joint
geom_model.addGeometryObject(geom_obj)
def project_q(self, q):
return q
def make_geom_obj(self):
raise NotImplementedError
|
def get_replay_buffer(variant, expl_env):
'\n Define replay buffer specific to the mode\n '
mode = variant['mode']
if (mode == 'vanilla'):
replay_buffer = EnvReplayBuffer(env=expl_env, **variant['replay_buffer_kwargs'])
elif (mode == 'her'):
replay_buffer = ObsDictRelabelingBuffer(env=expl_env, **variant['her'], **variant['replay_buffer_kwargs'])
return replay_buffer
|
def get_networks(variant, expl_env):
'\n Define Q networks and policy network\n '
qf_kwargs = variant['qf_kwargs']
policy_kwargs = variant['policy_kwargs']
shared_base = None
(qf_class, qf_kwargs) = utils.get_q_network(variant['archi'], qf_kwargs, expl_env)
(policy_class, policy_kwargs) = utils.get_policy_network(variant['archi'], policy_kwargs, expl_env, 'tanhgaussian')
qf1 = qf_class(**qf_kwargs)
qf2 = qf_class(**qf_kwargs)
target_qf1 = qf_class(**qf_kwargs)
target_qf2 = qf_class(**qf_kwargs)
policy = policy_class(**policy_kwargs)
print('Policy:')
print(policy)
nets = [qf1, qf2, target_qf1, target_qf2, policy, shared_base]
print(f'Q function num parameters: {qf1.num_params()}')
print(f'Policy num parameters: {policy.num_params()}')
return nets
|
def get_path_collector(variant, expl_env, eval_env, policy, eval_policy):
'\n Define path collector\n '
mode = variant['mode']
if (mode == 'vanilla'):
expl_path_collector = MdpPathCollector(expl_env, policy)
eval_path_collector = MdpPathCollector(eval_env, eval_policy)
elif (mode == 'her'):
expl_path_collector = GoalConditionedPathCollector(expl_env, policy, observation_key=variant['her']['observation_key'], desired_goal_key=variant['her']['desired_goal_key'], representation_goal_key=variant['her']['representation_goal_key'])
eval_path_collector = GoalConditionedPathCollector(eval_env, eval_policy, observation_key=variant['her']['observation_key'], desired_goal_key=variant['her']['desired_goal_key'], representation_goal_key=variant['her']['representation_goal_key'])
return (expl_path_collector, eval_path_collector)
|
def sac(variant):
expl_env = gym.make(variant['env_name'])
eval_env = gym.make(variant['env_name'])
expl_env.seed(variant['seed'])
eval_env.set_eval()
mode = variant['mode']
archi = variant['archi']
if (mode == 'her'):
variant['her'] = dict(observation_key='observation', desired_goal_key='desired_goal', achieved_goal_key='achieved_goal', representation_goal_key='representation_goal')
replay_buffer = get_replay_buffer(variant, expl_env)
(qf1, qf2, target_qf1, target_qf2, policy, shared_base) = get_networks(variant, expl_env)
expl_policy = policy
eval_policy = MakeDeterministic(policy)
(expl_path_collector, eval_path_collector) = get_path_collector(variant, expl_env, eval_env, expl_policy, eval_policy)
mode = variant['mode']
trainer = SACTrainer(env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=target_qf1, target_qf2=target_qf2, **variant['trainer_kwargs'])
if (mode == 'her'):
trainer = HERTrainer(trainer)
algorithm = TorchBatchRLAlgorithm(trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, **variant['algorithm_kwargs'])
algorithm.to(ptu.device)
algorithm.train()
|
def archi_to_network(archi_name, function_type):
allowed_function_type = ['vanilla', 'tanhgaussian']
if (function_type not in allowed_function_type):
raise ValueError(f'Function name should be in {allowed_function_type}')
return ARCHI[archi_name][function_type]
|
def get_policy_network(archi, kwargs, env, policy_type):
action_dim = env.action_space.low.size
obs_dim = env.observation_space.spaces['observation'].low.size
goal_dim = env.observation_space.spaces['representation_goal'].low.size
if (policy_type == 'tanhgaussian'):
kwargs['obs_dim'] = (obs_dim + goal_dim)
kwargs['action_dim'] = action_dim
else:
kwargs['output_size'] = action_dim
if (archi != 'kinnet'):
kwargs['hidden_sizes'] = ([kwargs.pop('hidden_dim')] * kwargs.pop('n_layers'))
if (archi != 'mlp'):
robot_props = env.robot_props
obs_indices = env.obs_indices
obstacles_dim = env.obstacles_dim
coordinate_frame = env.coordinate_frame
policy_class = archi_to_network(archi, policy_type)
if (archi == 'mlp'):
if (policy_type == 'vanilla'):
kwargs['input_size'] = (obs_dim + goal_dim)
elif ('pointnet' in archi):
obstacle_point_dim = env.obstacle_point_dim
kwargs['q_action_dim'] = 0
kwargs['robot_props'] = robot_props
kwargs['obstacle_point_dim'] = obstacle_point_dim
kwargs['input_indices'] = obs_indices
kwargs['hidden_activation'] = F.elu
kwargs['coordinate_frame'] = coordinate_frame
elif (archi == 'cnn'):
kwargs.pop('obs_dim', None)
kwargs['q_action_dim'] = 0
kwargs['conv_sizes'] = (1, 16, 32, 64)
kwargs['fc_sizes'] = (256, 256)
kwargs['input_indices'] = obs_indices
kwargs['robot_props'] = robot_props
kwargs['coordinate_frame'] = coordinate_frame
kwargs.pop('hidden_sizes')
elif (archi == 'voxnet'):
kwargs['q_action_dim'] = 0
kwargs['input_indices'] = obs_indices
kwargs['robot_props'] = robot_props
kwargs['coordinate_frame'] = coordinate_frame
else:
raise ValueError(f'Unknown network archi: {archi}')
return (policy_class, kwargs)
|
def get_q_network(archi, kwargs, env, classification=False):
action_dim = env.action_space.low.size
obs_dim = env.observation_space.spaces['observation'].low.size
goal_dim = env.observation_space.spaces['representation_goal'].low.size
kwargs['output_size'] = 1
q_action_dim = action_dim
if (archi != 'kinnet'):
kwargs['hidden_sizes'] = ([kwargs.pop('hidden_dim')] * kwargs.pop('n_layers'))
if (archi != 'mlp'):
robot_props = env.robot_props
obs_indices = env.obs_indices
obstacles_dim = env.obstacles_dim
coordinate_frame = env.coordinate_frame
qf_class = archi_to_network(archi, 'vanilla')
if (archi == 'mlp'):
kwargs['input_size'] = ((obs_dim + goal_dim) + q_action_dim)
elif ('pointnet' in archi):
obstacle_point_dim = env.obstacle_point_dim
kwargs['q_action_dim'] = q_action_dim
kwargs['robot_props'] = robot_props
kwargs['obstacle_point_dim'] = obstacle_point_dim
kwargs['input_indices'] = obs_indices
kwargs['hidden_activation'] = F.elu
kwargs['coordinate_frame'] = coordinate_frame
elif (archi == 'cnn'):
kwargs['q_action_dim'] = q_action_dim
kwargs['conv_sizes'] = (1, 16, 32, 64)
kwargs['fc_sizes'] = (256, 256)
kwargs['input_indices'] = obs_indices
kwargs['robot_props'] = robot_props
kwargs['coordinate_frame'] = coordinate_frame
kwargs.pop('hidden_sizes', None)
elif (archi == 'voxnet'):
kwargs.pop('obs_dim', None)
kwargs['q_action_dim'] = q_action_dim
kwargs['input_indices'] = obs_indices
kwargs['robot_props'] = robot_props
kwargs['coordinate_frame'] = coordinate_frame
else:
raise ValueError(f'Unknown network archi: {archi}')
return (qf_class, kwargs)
|
class MLPBlock(nn.Module):
def __init__(self, sizes, output_activation, hidden_activation=F.elu, hidden_init=ptu.fanin_init, b_init_value=0.1):
super().__init__()
self.output_activation = output_activation
self.hidden_activation = hidden_activation
self.hidden_init = hidden_init
self.b_init_value = b_init_value
self.sizes = sizes
self.fcs = []
in_size = sizes[0]
for (i, next_size) in enumerate(sizes[1:]):
fc = nn.Linear(in_size, next_size)
in_size = next_size
self.hidden_init(fc.weight, scale=1.0)
fc.bias.data.fill_(self.b_init_value)
fc_name = f'fc{i}'
self.__setattr__(fc_name, fc)
self.fcs.append(fc)
def forward(self, x):
for fc in self.fcs[:(- 1)]:
res = x
x = fc(x)
if (x.size() == res.size()):
x += res
x = self.hidden_activation(x)
x = self.fcs[(- 1)](x)
x = self.output_activation(x)
return x
|
class RandomPolicy():
def __init__(self, env):
self.action_space = env.action_space
def reset(self):
pass
def get_action(self, obs):
low = np.array(self.action_space.low, ndmin=1)
dim = low.shape[0]
action = np.random.normal(size=(dim,))
action = np.tanh(action)
return (action, {})
|
class StraightLinePolicy():
def __init__(self, env):
self.action_space = env.action_space
self.env = env
def reset(self):
pass
def get_action(self, obs):
current = self.env.state.q
goal = self.env.goal_state.q
action = (goal - current)[:self.action_space.low.shape[0]]
norm = np.linalg.norm(action)
norm = max(0.07, norm)
action /= norm
return (action, {})
|
@click.command()
@click.argument('env_name', type=str)
@click.option('-exp', '--exp-name', default='', type=str)
@click.option('-s', '--seed', default=None, type=int)
@click.option('-h', '--horizon', default=50, type=int, help='max steps allowed')
@click.option('-e', '--episodes', default=0, type=int, help='number of episodes to evaluate')
@click.option('-cpu', '--cpu/--no-cpu', default=False, is_flag=True, help='use cpu')
@click.option('-stoch', '--stochastic/--no-stochastic', default=False, is_flag=True, help='stochastic mode')
def main(env_name, exp_name, seed, horizon, episodes, cpu, stochastic):
if (not cpu):
set_gpu_mode(True)
set_seed(seed)
env = gym.make(env_name)
env.seed(seed)
env.set_eval()
log_dir = settings.log_dir()
if exp_name:
policy = utils.load(log_dir, exp_name, cpu, stochastic)
if stochastic:
num_params = policy.num_params()
else:
num_params = policy.stochastic_policy.num_params()
print(f'num params: {num_params}')
else:
policy = RandomPolicy(env)
render = (episodes == 0)
reset_kwargs = {}
def rollout_fn():
return multitask_rollout(env, policy, horizon, render, observation_key='observation', desired_goal_key='desired_goal', representation_goal_key='representation_goal', **reset_kwargs)
if render:
paths = utils.render(env, rollout_fn)
else:
(success_rate, n_col, paths_states) = utils.evaluate(rollout_fn, episodes)
print(f'Success rate: {success_rate} - Collisions: {n_col}')
|
def check_os_environ(key, use):
if (key not in os.environ):
print(f'{key} is not defined in the os variables, it is required for {use}.')
print(f'Use home directory by default.')
return os.path.expanduser('~')
return os.environ[key]
|
def log_dir():
checkpoint = check_os_environ('CHECKPOINT', 'model checkpointing')
return checkpoint
|
@click.command(help='nmp.train env_name exp_name')
@click.argument('env-name', type=str)
@click.argument('exp-dir', type=str)
@click.option('-s', '--seed', default=None, type=int)
@click.option('-resume', '--resume/--no-resume', is_flag=True, default=False)
@click.option('-mode', '--mode', default='her')
@click.option('-archi', '--archi', default='pointnet')
@click.option('-epochs', '--epochs', default=3000, type=int)
@click.option('-rscale', '--reward-scale', default=1, type=float)
@click.option('-h-dim', '--hidden-dim', default=256, type=int)
@click.option('-bs', '--batch-size', default=256, type=int)
@click.option('-lr', '--learning-rate', default=0.0003, type=float)
@click.option('-n-layers', '--n-layers', default=3, type=int)
@click.option('-tau', '--soft-target-tau', default=0.005, type=float)
@click.option('-auto-alpha', '--auto-alpha/--no-auto-alpha', is_flag=True, default=True)
@click.option('-alpha', '--alpha', default=0.1, type=float)
@click.option('-frac-goal-replay', '--frac-goal-replay', default=0.8, type=float)
@click.option('-horizon', '--horizon', default=80, type=int)
@click.option('-rbs', '--replay-buffer-size', default=int(1000000.0), type=int)
@click.option('-cpu', '--cpu/--no-cpu', is_flag=True, default=False)
@click.option('-snap-mode', '--snapshot-mode', default='last', type=str, help='all, last, gap, gap_and_last, none')
@click.option('-snap-gap', '--snapshot-gap', default=10, type=int)
def main(env_name, exp_dir, seed, resume, mode, archi, epochs, reward_scale, hidden_dim, batch_size, learning_rate, n_layers, soft_target_tau, auto_alpha, alpha, frac_goal_replay, horizon, replay_buffer_size, snapshot_mode, snapshot_gap, cpu):
valid_modes = ['vanilla', 'her']
valid_archi = ['mlp', 'cnn', 'pointnet']
if (mode not in valid_modes):
raise ValueError(f'Unknown mode: {mode}')
if (archi not in valid_archi):
raise ValueError(f'Unknown network archi: {archi}')
machine_log_dir = settings.log_dir()
exp_dir = os.path.join(machine_log_dir, exp_dir, f'seed{seed}')
replay_buffer_size = replay_buffer_size
num_expl_steps_per_train_loop = 1000
num_eval_steps_per_epoch = 1000
min_num_steps_before_training = 1000
num_trains_per_train_loop = 1000
policy_lr = learning_rate
qf_lr = learning_rate
variant = dict(env_name=env_name, algorithm='sac', version='normal', seed=seed, resume=resume, mode=mode, archi=archi, replay_buffer_kwargs=dict(max_replay_buffer_size=replay_buffer_size), algorithm_kwargs=dict(batch_size=batch_size, num_epochs=epochs, num_eval_steps_per_epoch=num_eval_steps_per_epoch, num_expl_steps_per_train_loop=num_expl_steps_per_train_loop, num_trains_per_train_loop=num_trains_per_train_loop, min_num_steps_before_training=min_num_steps_before_training, max_path_length=horizon), trainer_kwargs=dict(discount=0.99, soft_target_tau=soft_target_tau, target_update_period=1, policy_lr=policy_lr, qf_lr=qf_lr, reward_scale=reward_scale, use_automatic_entropy_tuning=auto_alpha, alpha=alpha), qf_kwargs=dict(hidden_dim=hidden_dim, n_layers=n_layers), policy_kwargs=dict(hidden_dim=hidden_dim, n_layers=n_layers), log_dir=exp_dir)
if (mode == 'her'):
variant['replay_buffer_kwargs'].update(dict(fraction_goals_rollout_goals=(1 - frac_goal_replay), fraction_goals_env_goals=0))
set_seed(seed)
setup_logger_kwargs = {'exp_prefix': exp_dir, 'variant': variant, 'log_dir': exp_dir, 'snapshot_mode': snapshot_mode, 'snapshot_gap': snapshot_gap}
setup_logger(**setup_logger_kwargs)
ptu.set_gpu_mode((not cpu), distributed_mode=False)
print(f'Start training...')
sac(variant)
|
def find_datafiles(path):
return [(os.path.join('etc', d), [os.path.join(d, f) for f in files]) for (d, folders, files) in os.walk(path)]
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, in_channels, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
out = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
out.append(x)
x = self.maxpool(x)
x = self.layer1(x)
out.append(x)
x = self.layer2(x)
out.append(x)
x = self.layer3(x)
out.append(x)
x = self.layer4(x)
out.append(x)
x = self.avgpool(x)
out.append(x)
return out
|
def _resnet(in_channels, arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(in_channels, block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.