code
stringlengths 17
6.64M
|
---|
def get_same_padding_maxPool2d(image_size=None):
'Chooses static padding if you have specified an image size, and dynamic padding otherwise.\n Static padding is necessary for ONNX exporting of models.\n Args:\n image_size (int or tuple): Size of the image.\n Returns:\n MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.\n '
if (image_size is None):
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
|
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.\n The padding is operated in forward function by calculating dynamically.\n "
def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode)
self.stride = (([self.stride] * 2) if isinstance(self.stride, int) else self.stride)
self.kernel_size = (([self.kernel_size] * 2) if isinstance(self.kernel_size, int) else self.kernel_size)
self.dilation = (([self.dilation] * 2) if isinstance(self.dilation, int) else self.dilation)
def forward(self, x):
(ih, iw) = x.size()[(- 2):]
(kh, kw) = self.kernel_size
(sh, sw) = self.stride
(oh, ow) = (math.ceil((ih / sh)), math.ceil((iw / sw)))
pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0)
pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0)
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode, self.return_indices)
|
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.\n The padding mudule is calculated in construction function, then used in forward.\n "
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = (([self.stride] * 2) if isinstance(self.stride, int) else self.stride)
self.kernel_size = (([self.kernel_size] * 2) if isinstance(self.kernel_size, int) else self.kernel_size)
self.dilation = (([self.dilation] * 2) if isinstance(self.dilation, int) else self.dilation)
assert (image_size is not None)
(ih, iw) = ((image_size, image_size) if isinstance(image_size, int) else image_size)
(kh, kw) = self.kernel_size
(sh, sw) = self.stride
(oh, ow) = (math.ceil((ih / sh)), math.ceil((iw / sw)))
pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0)
pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0)
if ((pad_h > 0) or (pad_w > 0)):
self.static_padding = nn.ZeroPad2d(((pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode, self.return_indices)
return x
|
class BlockDecoder(object):
'Block Decoder for readability,\n straight from the official TensorFlow repository.\n '
@staticmethod
def _decode_block_string(block_string):
"Get a block through a string notation of arguments.\n Args:\n block_string (str): A string notation of arguments.\n Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.\n Returns:\n BlockArgs: The namedtuple defined at the top of this file.\n "
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split('(\\d.*)', op)
if (len(splits) >= 2):
(key, value) = splits[:2]
options[key] = value
assert ((('s' in options) and (len(options['s']) == 1)) or ((len(options['s']) == 2) and (options['s'][0] == options['s'][1])))
return BlockArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=(float(options['se']) if ('se' in options) else None), id_skip=('noskip' not in block_string))
@staticmethod
def _encode_block_string(block):
'Encode a block to a string.\n Args:\n block (namedtuple): A BlockArgs type argument.\n Returns:\n block_string: A String form of BlockArgs.\n '
args = [('r%d' % block.num_repeat), ('k%d' % block.kernel_size), ('s%d%d' % (block.strides[0], block.strides[1])), ('e%s' % block.expand_ratio), ('i%d' % block.input_filters), ('o%d' % block.output_filters)]
if (0 < block.se_ratio <= 1):
args.append(('se%s' % block.se_ratio))
if (block.id_skip is False):
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
'Decode a list of string notations to specify blocks inside the network.\n Args:\n string_list (list[str]): A list of strings, each string is a notation of block.\n Returns:\n blocks_args: A list of BlockArgs namedtuples of block args.\n '
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
'Encode a list of BlockArgs to a list of strings.\n Args:\n blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.\n Returns:\n block_strings: A list of strings, each string is a notation of block.\n '
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
|
def efficientnet_params(model_name):
'Map EfficientNet model name to parameter coefficients.\n Args:\n model_name (str): Model name to be queried.\n Returns:\n params_dict[model_name]: A (width,depth,res,dropout) tuple.\n '
params_dict = {'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5)}
return params_dict[model_name]
|
def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None, dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000, include_top=True):
'Create BlockArgs and GlobalParams for efficientnet model.\n Args:\n width_coefficient (float)\n depth_coefficient (float)\n image_size (int)\n dropout_rate (float)\n drop_connect_rate (float)\n num_classes (int)\n Meaning as the name suggests.\n Returns:\n blocks_args, global_params.\n '
blocks_args = ['r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25']
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(width_coefficient=width_coefficient, depth_coefficient=depth_coefficient, image_size=image_size, dropout_rate=dropout_rate, num_classes=num_classes, batch_norm_momentum=0.99, batch_norm_epsilon=0.001, drop_connect_rate=drop_connect_rate, depth_divisor=8, min_depth=None, include_top=include_top)
return (blocks_args, global_params)
|
def get_model_params(model_name, override_params):
"Get the block args and global params for a given model name.\n Args:\n model_name (str): Model's name.\n override_params (dict): A dict to modify global_params.\n Returns:\n blocks_args, global_params\n "
if model_name.startswith('efficientnet'):
(w, d, s, p) = efficientnet_params(model_name)
(blocks_args, global_params) = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: {}'.format(model_name))
if override_params:
global_params = global_params._replace(**override_params)
return (blocks_args, global_params)
|
def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False):
'Loads pretrained weights from weights path or download using url.\n Args:\n model (Module): The whole model of efficientnet.\n model_name (str): Model name of efficientnet.\n weights_path (None or str):\n str: path to pretrained weights file on the local disk.\n None: use pretrained weights downloaded from the Internet.\n load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.\n advprop (bool): Whether to load pretrained weights\n trained with advprop (valid when weights_path is None).\n '
if isinstance(weights_path, str):
state_dict = torch.load(weights_path)
else:
url_map_ = (url_map_advprop if advprop else url_map)
state_dict = model_zoo.load_url(url_map_[model_name])
if (model.my_in_channels > 3):
state_dict['_conv_stem.weight'] = torch.nn.Parameter(torch.ones([model.stem_out_channels, model.my_in_channels, 3, 3]))
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
ret = model.load_state_dict(state_dict, strict=False)
print('Loaded pretrained weights for {}'.format(model_name))
|
def conv3x3(in_planes: int, out_planes: int, stride: int=1, groups: int=1, dilation: int=1) -> nn.Conv2d:
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes: int, out_planes: int, stride: int=1) -> nn.Conv2d:
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, in_channels: int=3, zero_init_residual: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[(BasicBlock, Bottleneck)]], planes: int, blocks: int, stride: int=1, dilate: bool=False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
def _resnet(arch: str, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], pretrained: bool, progress: bool, **kwargs: Any) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
|
def resnet18(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
|
def resnet34(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet50(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet101(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def resnet152(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
|
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnext101_32x8d(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def wide_resnet50_2(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def wide_resnet101_2(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
class MultiLeNet(nn.Module):
def __init__(self, dim, **kwargs):
super().__init__()
self.shared = nn.Sequential(nn.Conv2d(dim[0], 10, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Conv2d(10, 20, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Flatten(), nn.Linear(720, 50), nn.ReLU())
self.private_left = nn.Linear(50, 10)
self.private_right = nn.Linear(50, 10)
def forward(self, batch):
x = batch['data']
x = self.shared(x)
return dict(logits_l=self.private_left(x), logits_r=self.private_right(x))
def private_params(self):
return ['private_left.weight', 'private_left.bias', 'private_right.weight', 'private_right.bias']
|
class FullyConnected(nn.Module):
def __init__(self, dim, **kwargs):
super().__init__()
self.f = nn.Sequential(nn.Linear(dim[0], 60), nn.ReLU(), nn.Linear(60, 25), nn.ReLU(), nn.Linear(25, 1))
def forward(self, batch):
x = batch['data']
return dict(logits=self.f(x))
|
def from_name(names, task_names):
objectives = {'CrossEntropyLoss': CrossEntropyLoss, 'BinaryCrossEntropyLoss': BinaryCrossEntropyLoss, 'L1Regularization': L1Regularization, 'L2Regularization': L2Regularization, 'ddp': DDPHyperbolicTangentRelaxation, 'deo': DEOHyperbolicTangentRelaxation}
if (task_names is not None):
return [objectives[n]('labels_{}'.format(t), 'logits_{}'.format(t)) for (n, t) in zip(names, task_names)]
else:
return [objectives[n]() for n in names]
|
class CrossEntropyLoss(torch.nn.CrossEntropyLoss):
def __init__(self, label_name='labels', logits_name='logits'):
super().__init__(reduction='mean')
self.label_name = label_name
self.logits_name = logits_name
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
return super().__call__(logits, labels)
|
class BinaryCrossEntropyLoss(torch.nn.BCEWithLogitsLoss):
def __init__(self, label_name='labels', logits_name='logits', pos_weight=None):
super().__init__(reduction='mean', pos_weight=(torch.Tensor([pos_weight]).cuda() if pos_weight else None))
self.label_name = label_name
self.logits_name = logits_name
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
if (logits.ndim == 2):
logits = torch.squeeze(logits)
if (labels.dtype != torch.float):
labels = labels.float()
return super().__call__(logits, labels)
|
class MSELoss(torch.nn.MSELoss):
def __init__(self, label_name='labels'):
super().__init__()
self.label_name = label_name
def __call__(self, **kwargs):
logits = kwargs['logits']
labels = kwargs[self.label_name]
if (logits.ndim == 2):
logits = torch.squeeze(logits)
return super().__call__(logits, labels)
|
class L1Regularization():
def __call__(self, **kwargs):
model = kwargs['model']
return torch.linalg.norm(torch.cat([p.view((- 1)) for p in model.parameters()]), ord=1)
|
class L2Regularization():
def __call__(self, **kwargs):
model = kwargs['model']
return torch.linalg.norm(torch.cat([p.view((- 1)) for p in model.parameters()]), ord=2)
|
class DDPHyperbolicTangentRelaxation():
def __init__(self, label_name='labels', logits_name='logits', s_name='sensible_attribute', c=1):
self.label_name = label_name
self.logits_name = logits_name
self.s_name = s_name
self.c = c
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
sensible_attribute = kwargs[self.s_name]
n = logits.shape[0]
logits = torch.sigmoid(logits)
s_negative = logits[sensible_attribute.bool()]
s_positive = logits[(~ sensible_attribute.bool())]
return ((1 / n) * torch.abs((torch.sum(torch.tanh((self.c * torch.relu(s_positive)))) - torch.sum(torch.tanh((self.c * torch.relu(s_negative)))))))
|
class DEOHyperbolicTangentRelaxation():
def __init__(self, label_name='labels', logits_name='logits', s_name='sensible_attribute', c=1):
self.label_name = label_name
self.logits_name = logits_name
self.s_name = s_name
self.c = c
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
sensible_attribute = kwargs[self.s_name]
n = logits.shape[0]
logits = torch.sigmoid(logits)
s_negative = logits[(sensible_attribute.bool() & (labels == 1))]
s_positive = logits[((~ sensible_attribute.bool()) & (labels == 1))]
return ((1 / n) * torch.abs((torch.sum(torch.tanh((self.c * torch.relu(s_positive)))) - torch.sum(torch.tanh((self.c * torch.relu(s_negative)))))))
|
class Fonseca1():
def f1(theta):
d = len(theta)
sum1 = autograd.numpy.sum([((theta[i] - (1.0 / autograd.numpy.sqrt(d))) ** 2) for i in range(d)])
f1 = (1 - autograd.numpy.exp((- sum1)))
return f1
f1_dx = autograd.grad(f1)
def __call__(self, **kwargs):
return f1(kwargs['parameters'])
def gradient(self, **kwargs):
return f1_dx(kwargs['parameters'])
|
class Fonseca2():
def f2(theta):
d = len(theta)
sum1 = autograd.numpy.sum([((theta[i] + (1.0 / autograd.numpy.sqrt(d))) ** 2) for i in range(d)])
f1 = (1 - autograd.numpy.exp((- sum1)))
return f1
f2_dx = autograd.grad(f2)
def __call__(self, **kwargs):
return f2(kwargs['parameters'])
def gradient(self, **kwargs):
return f2_dx(kwargs['parameters'])
|
def from_objectives(objectives):
scores = {obj.CrossEntropyLoss: CrossEntropy, obj.BinaryCrossEntropyLoss: BinaryCrossEntropy, obj.DDPHyperbolicTangentRelaxation: DDP, obj.DEOHyperbolicTangentRelaxation: DEO, obj.MSELoss: L2Distance}
return [scores[o.__class__](o.label_name, o.logits_name) for o in objectives]
|
class BaseScore():
def __init__(self, label_name='labels', logits_name='logits'):
super().__init__()
self.label_name = label_name
self.logits_name = logits_name
@abstractmethod
def __call__(self, **kwargs):
raise NotImplementedError()
|
class CrossEntropy(BaseScore):
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
with torch.no_grad():
return torch.nn.functional.cross_entropy(logits, labels.long(), reduction='mean').item()
|
class BinaryCrossEntropy(BaseScore):
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
if ((len(logits.shape) > 1) and (logits.shape[1] == 1)):
logits = torch.squeeze(logits)
with torch.no_grad():
return torch.nn.functional.binary_cross_entropy_with_logits(logits, labels.float(), reduction='mean').item()
|
class L2Distance(BaseScore):
def __call__(self, **kwargs):
prediction = kwargs['logits']
labels = kwargs[self.label_name]
with torch.no_grad():
return torch.linalg.norm((prediction - labels), ord=2)
|
class mcr(BaseScore):
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
with torch.no_grad():
if (len(logits.shape) == 1):
y_hat = torch.round(torch.sigmoid(logits))
elif (logits.shape[1] == 1):
logits = torch.squeeze(logits)
y_hat = torch.round(torch.sigmoid(logits))
else:
y_hat = torch.argmax(logits, dim=1)
accuracy = (sum((y_hat == labels)) / len(y_hat))
return (1 - accuracy.item())
|
class DDP(BaseScore):
'Difference in Democratic Parity'
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
sensible_attribute = kwargs['sensible_attribute']
with torch.no_grad():
n = logits.shape[0]
logits_s_negative = logits[sensible_attribute.bool()]
logits_s_positive = logits[(~ sensible_attribute.bool())]
return ((1 / n) * torch.abs((torch.sum((logits_s_negative > 0)) - torch.sum((logits_s_positive > 0))))).cpu().item()
|
class DEO(BaseScore):
'Difference in Equality of Opportunity'
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
sensible_attribute = kwargs['sensible_attribute']
with torch.no_grad():
n = logits.shape[0]
logits_s_negative = logits[(sensible_attribute.bool() & (labels == 1))]
logits_s_positive = logits[((~ sensible_attribute.bool()) & (labels == 1))]
return ((1 / n) * torch.abs((torch.sum((logits_s_negative > 0)) - torch.sum((logits_s_positive > 0))))).cpu().item()
|
class MiniImageNet(Dataset):
def __init__(self, setname, args):
csv_path = osp.join(SPLIT_PATH, (setname + '.csv'))
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = (- 1)
self.wnids = []
for l in lines:
(name, wnid) = l.split(',')
path = osp.join(IMAGE_PATH, name)
if (wnid not in self.wnids):
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data
self.label = label
self.num_class = len(set(label))
self.args = args
if (args.model_type == 'ConvNet'):
image_size = 84
self.to_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(np.array([0.485, 0.456, 0.406]), np.array([0.229, 0.224, 0.225]))])
self.transform = transforms.Compose([transforms.Resize(92), transforms.CenterCrop(image_size)])
else:
image_size = 84
self.to_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(np.array([(x / 255.0) for x in [120.39586422, 115.59361427, 104.54012653]]), np.array([(x / 255.0) for x in [70.68188272, 68.27635443, 72.54505529]]))])
self.transform = transforms.Compose([transforms.Resize(92), transforms.CenterCrop(image_size)])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
(path, label) = (self.data[i], self.label[i])
image = self.transform(Image.open(path).convert('RGB'))
image_0 = self.to_tensor(image)
image_90 = self.to_tensor(TF.rotate(image, 90))
image_180 = self.to_tensor(TF.rotate(image, 180))
image_270 = self.to_tensor(TF.rotate(image, 270))
all_images = torch.stack([image_0, image_90, image_180, image_270], 0)
return (all_images, label)
|
class CategoriesSamplerBak():
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
self.n_step = 0
self.mark = {}
self.r_clses = None
label = np.array(label)
self.m_ind = []
for i in range((max(label) + 1)):
ind = np.argwhere((label == i)).reshape((- 1))
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
if (self.r_clses is None):
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
self.r_clses = classes
else:
classes = self.r_clses
self.r_clses = None
for c in classes:
l = self.m_ind[c]
self.mark[l] = True
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape((- 1))
(yield batch)
def getmark(self):
count = 0
for c in self.m_ind:
if (c not in self.mark):
count += 1
print(count)
|
class CategoriesSampler():
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
self.n_step = 0
label = np.array(label)
self.m_ind = []
for i in range((max(label) + 1)):
ind = np.argwhere((label == i)).reshape((- 1))
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape((- 1))
(yield batch)
|
class ScaledDotProductAttention(nn.Module):
' Scaled Dot-Product Attention '
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = (attn / self.temperature)
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return (output, attn, log_attn)
|
class MultiHeadAttention(nn.Module):
' Multi-Head Attention module '
def __init__(self, args, n_head, d_model, d_k, d_v, dropout=0.1, do_activation=True):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.do_activation = do_activation
self.w_qs = nn.Linear(d_model, (n_head * d_k))
self.w_ks = nn.Linear(d_model, (n_head * d_k))
self.w_vs = nn.Linear(d_model, (n_head * d_v))
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_k))))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_k))))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_v))))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear((n_head * d_v), d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu
def forward(self, q, k, v):
(d_k, d_v, n_head) = (self.d_k, self.d_v, self.n_head)
(sz_b, len_q, _) = q.size()
(sz_b, len_k, _) = k.size()
(sz_b, len_v, _) = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
if self.do_activation:
q = self.activation(q)
k = self.activation(k)
v = self.activation(v)
q = q.permute(2, 0, 1, 3).contiguous().view((- 1), len_q, d_k)
k = k.permute(2, 0, 1, 3).contiguous().view((- 1), len_k, d_k)
v = v.permute(2, 0, 1, 3).contiguous().view((- 1), len_v, d_v)
(output, attn, log_attn) = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, (- 1))
output = self.dropout(self.fc(output))
if self.do_activation:
output = self.activation(output)
output = self.layer_norm((output + residual))
return output
|
class SSL_boost(nn.Module):
def __init__(self, args, dropout=0.2):
super().__init__()
self.args = args
if (args.model_type == 'ConvNet'):
from SSL.networks.convnet import ConvNet
cnn_dim = args.embed_size
self.encoder = ConvNet(args, z_dim=cnn_dim)
elif (args.model_type == 'ResNet12'):
from SSL.networks.ResNet12_embedding import resnet12
self.encoder = resnet12()
cnn_dim = args.embed_size
else:
raise ValueError('')
z_dim = cnn_dim
self.slf_attn = MultiHeadAttention(args, args.head, z_dim, z_dim, z_dim, dropout=dropout, do_activation=True)
self.KDloss = nn.KLDivLoss(reduce=True)
self.MSELoss = nn.MSELoss()
self.trans_num = 4
self.Rotation_classifier = nn.Sequential(nn.Linear(z_dim, self.trans_num), nn.ReLU())
def expend_tasks(self, support_tasks, query_tasks):
support_s = list(torch.split(support_tasks, 1, dim=1))
support_s = [support.squeeze(1) for support in support_s]
query_s = list(torch.split(query_tasks, 1, dim=1))
query_s = [query.squeeze(1) for query in query_s]
return (support_s, query_s)
def fsl_module_per_task(self, support, query):
(N, K, Q) = (self.args.way, self.args.shot, self.args.query)
input_tensor = torch.cat([support, query], 0)
output = self.encoder(input_tensor)
support = output[:support.size(0)]
query = output[support.size(0):]
proto = support.reshape(K, (- 1), support.shape[(- 1)]).mean(dim=0)
logitis = euclidean_metric(query, proto)
logitis = (logitis / self.args.temperature)
return (support, query, proto, logitis)
def forward(self, support, query, mode='test'):
(N, K, Q) = (self.args.way, self.args.shot, self.args.query)
(support_s, query_s) = self.expend_tasks(support, query)
rot_label = torch.arange(self.trans_num, dtype=torch.int8).view((- 1), 1).repeat(1, ((Q * N) + (K * N))).type(torch.LongTensor)
rot_label = rot_label.view((- 1)).cuda()
fsl_label = torch.arange(N, dtype=torch.int8).repeat(Q).type(torch.LongTensor).cuda()
(sup_feats, que_feats, protos, class_dist) = ([], [], [], [])
rotation_samples = []
for (support_ang, query_ang) in zip(support_s, query_s):
(support, query, proto, logitis) = self.fsl_module_per_task(support_ang, query_ang)
sup_feats.append(support)
que_feats.append(query)
protos.append(proto)
class_dist.append(logitis)
rotation_samples.append(torch.cat([support, query], 0))
rotation_samples = torch.cat(rotation_samples, 0)
rot_pred = self.Rotation_classifier(rotation_samples)
rot_loss = F.cross_entropy(rot_pred, rot_label)
raw_logits = (sum(class_dist) / len(class_dist))
raw_logits = F.log_softmax(raw_logits, (- 1))
MI_losses = [F.kl_div(raw_logits, F.softmax(logits, (- 1)), size_average=True) for logits in class_dist]
MI_loss = (sum(MI_losses) / len(MI_losses))
fsl_losses = [F.cross_entropy(logits, fsl_label) for logits in class_dist]
fsl_loss = (sum(fsl_losses) / len(fsl_losses))
trans_support = torch.stack(sup_feats, 1)
trans_query = torch.stack(que_feats, 1)
trans_support = self.slf_attn(trans_support, trans_support, trans_support)
trans_query = self.slf_attn(trans_query, trans_query, trans_query)
trans_support = trans_support.view((K * N), (- 1))
trans_query = trans_query.view((Q * N), (- 1))
proto = trans_support.reshape(K, (- 1), trans_support.shape[(- 1)]).mean(dim=0)
logitis = euclidean_metric(trans_query, proto)
logitis = (logitis / self.args.temperature)
final_loss = F.cross_entropy(logitis, fsl_label)
acc_list = [count_acc(logits, fsl_label) for logits in class_dist]
acc_list.append(count_acc(logitis, fsl_label))
if (mode == 'extract'):
(proto, feats) = ([], [])
for i in range(self.trans_num):
proto.append(sup_feats[i].view(K, N, (- 1)).mean(dim=0))
feats.append(torch.cat([sup_feats[i].view(K, N, (- 1)), que_feats[i].view(Q, N, (- 1))], 0).view(((K + Q) * N), (- 1)))
trans_proto = trans_support.view(K, N, (- 1)).mean(dim=0)
trans_feats = torch.cat([trans_support.view(K, N, (- 1)), trans_query.view(Q, N, (- 1))], 0).view(((K + Q) * N), (- 1))
proto = torch.stack(proto, 1)
feats = torch.stack(feats, 1)
return (acc_list, proto, feats, trans_proto, trans_feats)
return (rot_loss, MI_loss, fsl_loss, final_loss, acc_list)
|
def conv_block(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.MaxPool2d(2))
|
class ConvNet(nn.Module):
def __init__(self, args, x_dim=3, hid_dim=64, z_dim=64):
super().__init__()
self.args = args
self.encoder = nn.Sequential(conv_block(x_dim, hid_dim), conv_block(hid_dim, hid_dim), conv_block(hid_dim, hid_dim), conv_block(hid_dim, z_dim))
def forward(self, x):
x = self.encoder(x)
x = nn.MaxPool2d(5)(x)
x = x.view(x.size(0), (- 1))
return x
|
class WiderConvnet(nn.Module):
def __init__(self, args, emb_size=128):
super(WiderConvnet, self).__init__()
self.hidden = 64
self.last_hidden = (self.hidden * 25)
self.emb_size = emb_size
self.conv_1 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=self.hidden, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(num_features=self.hidden), nn.MaxPool2d(kernel_size=2), nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_2 = nn.Sequential(nn.Conv2d(in_channels=self.hidden, out_channels=int((self.hidden * 1.5)), kernel_size=3, bias=False), nn.BatchNorm2d(num_features=int((self.hidden * 1.5))), nn.MaxPool2d(kernel_size=2), nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_3 = nn.Sequential(nn.Conv2d(in_channels=int((self.hidden * 1.5)), out_channels=(self.hidden * 2), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(num_features=(self.hidden * 2)), nn.MaxPool2d(kernel_size=2), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Dropout2d(0.4))
self.conv_4 = nn.Sequential(nn.Conv2d(in_channels=(self.hidden * 2), out_channels=(self.hidden * 4), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(num_features=(self.hidden * 4)), nn.MaxPool2d(kernel_size=2), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Dropout2d(0.5))
self.layer_last = nn.Sequential(nn.Linear(in_features=(self.last_hidden * 4), out_features=self.emb_size, bias=True), nn.BatchNorm1d(self.emb_size))
def forward(self, input_data):
output_data = self.conv_4(self.conv_3(self.conv_2(self.conv_1(input_data))))
return self.layer_last(output_data.view(output_data.size(0), (- 1)))
|
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
|
def ensure_path(path, remove=True):
if os.path.exists(path):
if remove:
if (input('{} exists, remove? ([y]/n)'.format(path)) != 'n'):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
|
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (((self.v * self.n) + x) / (self.n + 1))
self.n += 1
def item(self):
return self.v
|
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
else:
return (pred == label).type(torch.FloatTensor).mean().item()
|
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, (- 1))
b = b.unsqueeze(0).expand(n, m, (- 1))
logits = (- ((a - b) ** 2).sum(dim=(- 1)))
return logits
|
def cosine_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, (- 1))
b = b.unsqueeze(0).expand(n, m, (- 1))
|
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = ((time.time() - self.o) / p)
x = int(x)
if (x >= 3600):
return '{:.1f}h'.format((x / 3600))
if (x >= 60):
return '{}m'.format(round((x / 60)))
return '{}s'.format(x)
|
def pprint(x):
_utils_pp.pprint(x)
|
def compute_confidence_interval(data):
'\n Compute 95% confidence interval\n :param data: An array of mean accuracy (or mAP) across a number of sampled episodes.\n :return: the 95% confidence interval for this data.\n '
a = (1.0 * np.array(data))
m = np.mean(a)
std = np.std(a)
pm = (1.96 * (std / np.sqrt(len(a))))
return (m, pm)
|
def merge_new_config(config, new_config):
if ('_BASE_CONFIG_' in new_config):
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.load(f, Loader=yaml.FullLoader)
except:
yaml_config = yaml.load(f)
config.update(EasyDict(yaml_config))
for (key, val) in new_config.items():
if (not isinstance(val, dict)):
config[key] = val
continue
if (key not in config):
config[key] = EasyDict()
merge_new_config(config[key], val)
return config
|
def cfg_from_yaml_file(cfg_file, config):
with open(cfg_file, 'r') as f:
try:
new_config = yaml.load(f, Loader=yaml.FullLoader)
except:
new_config = yaml.load(f)
merge_new_config(config=config, new_config=new_config)
return config
|
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
|
class BasicConvLSTMCell(object):
'Basic Conv LSTM recurrent network cell.\n '
def __init__(self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, activation=tf.nn.tanh):
'Initialize the basic Conv LSTM cell.\n Args:\n shape: int tuple thats the height and width of the cell\n filter_size: int tuple thats the height and width of the filter\n num_features: int thats the depth of the cell \n forget_bias: float, The bias added to forget gates (see above).\n input_size: Deprecated and unused.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. The latter behavior will soon be deprecated.\n activation: Activation function of the inner states.\n '
if (input_size is not None):
logging.warn('%s: The input_size parameter is deprecated.', self)
self.shape = shape
self.filter_size = filter_size
self.num_features = num_features
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else (2 * self._num_units))
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None, reuse=False):
'Long short-term memory cell (LSTM).'
with tf.variable_scope((scope or type(self).__name__), reuse=reuse):
if self._state_is_tuple:
(c, h) = state
else:
(c, h) = tf.split(axis=3, num_or_size_splits=2, value=state)
concat = _conv_linear([inputs, h], self.filter_size, (self.num_features * 4), True)
(i, j, f, o) = tf.split(axis=3, num_or_size_splits=4, value=concat)
new_c = ((c * tf.nn.sigmoid((f + self._forget_bias))) + (tf.nn.sigmoid(i) * self._activation(j)))
new_h = (self._activation(new_c) * tf.nn.sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat(axis=3, values=[new_c, new_h])
return (new_h, new_state)
|
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None, reuse=False):
'convolution:\n Args:\n args: a 4D Tensor or a list of 4D, batch x n, Tensors.\n filter_size: int tuple of filter height and width.\n num_features: int, number of features.\n bias_start: starting value to initialize the bias; 0 by default.\n scope: VariableScope for the created subgraph; defaults to "Linear".\n reuse: For reusing already existing weights\n Returns:\n A 4D Tensor with shape [batch h w num_features]\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n '
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if (len(shape) != 4):
raise ValueError(('Linear is expecting 4D arguments: %s' % str(shapes)))
if (not shape[3]):
raise ValueError(('Linear expects shape[4] of arguments: %s' % str(shapes)))
else:
total_arg_size_depth += shape[3]
dtype = [a.dtype for a in args][0]
with tf.variable_scope((scope or 'Conv'), reuse=reuse):
matrix = tf.get_variable('Matrix', [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype)
if (len(args) == 1):
res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv2d(tf.concat(axis=3, values=args), matrix, strides=[1, 1, 1, 1], padding='SAME')
if (not bias):
return res
bias_term = tf.get_variable('Bias', [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype))
return (res + bias_term)
|
def batch_norm(inputs, name, train=True, reuse=False):
return tf.contrib.layers.batch_norm(inputs=inputs, is_training=train, reuse=reuse, scope=name, scale=True)
|
def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='conv2d', reuse=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[(- 1)], output_dim], initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
|
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv2d', reuse=False, with_w=False, padding='SAME'):
with tf.variable_scope(name, reuse=reuse):
w = tf.get_variable('w', [k_h, k_h, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.contrib.layers.xavier_initializer())
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1], padding=padding)
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return (deconv, w, biases)
else:
return deconv
|
def lrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = (0.5 * (1 + leak))
f2 = (0.5 * (1 - leak))
return ((f1 * x) + (f2 * abs(x)))
|
def relu(x):
return tf.nn.relu(x)
|
def tanh(x):
return tf.nn.tanh(x)
|
def shape2d(a):
'\n a: a int or tuple/list of length 2\n '
if (type(a) == int):
return [a, a]
if isinstance(a, (list, tuple)):
assert (len(a) == 2)
return list(a)
raise RuntimeError('Illegal shape: {}'.format(a))
|
def shape4d(a):
return (([1] + shape2d(a)) + [1])
|
def UnPooling2x2ZeroFilled(x):
out = tf.concat(axis=3, values=[x, tf.zeros_like(x)])
out = tf.concat(axis=2, values=[out, tf.zeros_like(out)])
sh = x.get_shape().as_list()
if (None not in sh[1:]):
out_size = [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]]
return tf.reshape(out, out_size)
else:
sh = tf.shape(x)
return tf.reshape(out, [(- 1), (sh[1] * 2), (sh[2] * 2), sh[3]])
|
def MaxPooling(x, shape, stride=None, padding='VALID'):
"\n MaxPooling on images.\n :param input: NHWC tensor.\n :param shape: int or [h, w]\n :param stride: int or [h, w]. default to be shape.\n :param padding: 'valid' or 'same'. default to 'valid'\n :returns: NHWC tensor.\n "
padding = padding.upper()
shape = shape4d(shape)
if (stride is None):
stride = shape
else:
stride = shape4d(stride)
return tf.nn.max_pool(x, ksize=shape, strides=stride, padding=padding)
|
def FixedUnPooling(x, shape):
'\n Unpool the input with a fixed mat to perform kronecker product with.\n :param input: NHWC tensor\n :param shape: int or [h, w]\n :returns: NHWC tensor\n '
shape = shape2d(shape)
return UnPooling2x2ZeroFilled(x)
|
def gdl(gen_frames, gt_frames, alpha):
'\n Calculates the sum of GDL losses between the predicted and gt frames.\n @param gen_frames: The predicted frames at each scale.\n @param gt_frames: The ground truth frames at each scale\n @param alpha: The power to which each gradient term is raised.\n @return: The GDL loss.\n '
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = ((- 1) * pos)
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0)
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)])
strides = [1, 1, 1, 1]
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
grad_diff_x = tf.abs((gt_dx - gen_dx))
grad_diff_y = tf.abs((gt_dy - gen_dy))
gdl_loss = tf.reduce_mean(((grad_diff_x ** alpha) + (grad_diff_y ** alpha)))
return gdl_loss
|
def linear(input_, output_size, name, stddev=0.02, bias_start=0.0, reuse=False, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name, reuse=reuse):
matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(bias_start))
if with_w:
return ((tf.matmul(input_, matrix) + bias), matrix, bias)
else:
return (tf.matmul(input_, matrix) + bias)
|
class Acq_Optimizer(object):
def __init__(self, model, acqu_func, bounds, batch_method='CL', batch_size=1, model_name='GP', nsubspace=1):
'\n Optimise the acquisition functions to recommend the next (batch) locations for evaluation\n\n :param model: BO surrogate model function\n :param acqu_func: BO acquisition function\n :param bounds: input space bounds\n :param batch_method: the method for selecting a batch of new locations to be evaluated next\n :param batch_size: the number of new query locations in the batch (=1 for sequential BO and > 1 for parallel BO)\n :param model_name: the name of the BO surrogate model\n :param nsubspace: number of subspaces needs to be specified for ADDGP-BO but equals 1 for other BO attacks\n '
self.model = model
self.acqu_func = acqu_func
self.batch_size = batch_size
self.batch_method = batch_method
self.bounds = bounds
self.model_name = model_name
self.nsubspace = nsubspace
def get_next(self, X, Y):
'\n :param X: observed input data\n :param Y: observed output data\n :return X_batch: batch of inputs recommended by BO to be evaluated next\n :return batch_acq_value: acqusitioin function values of the batch of inputs recommended\n '
if (self.batch_method.upper() == 'CL'):
if (self.model_name == 'GP'):
(X_batch, batch_acq_value) = optimise_acqu_func(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif (self.model_name == 'GPLDR'):
(X_batch, batch_acq_value) = optimise_acqu_func_mledr(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif self.model_name.startswith('ADDGP'):
(X_batch, batch_acq_value) = optimise_acqu_func_additive(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X, nsubspace=self.nsubspace)
else:
(X_batch, batch_acq_value) = optimise_acqu_func_for_NN(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
new_batch_point = X_batch
temporal_X = np.copy(X)
temporal_Y = np.copy(Y)
L = np.min(temporal_Y)
k = 1
while (k < self.batch_size):
temporal_X = np.vstack((temporal_X, new_batch_point))
temporal_Y = np.vstack((temporal_Y, L))
self.model._update_model(temporal_X, temporal_Y)
if (self.model_name == 'GP'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif (self.model_name == 'GPLDR'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_mledr(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif self.model_name.startswith('ADDGP'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_additive(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X, nsubspace=self.nsubspace)
else:
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_for_NN(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
X_batch = np.vstack((X_batch, new_batch_point))
batch_acq_value = np.vstack((batch_acq_value, next_batch_acq_value))
k += 1
elif (self.batch_method.upper() == 'KB'):
if (self.model_name == 'GP'):
(X_batch, batch_acq_value) = optimise_acqu_func(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif self.model_name.startswith('ADDGP'):
(X_batch, batch_acq_value) = optimise_acqu_func_additive(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X, nsubspace=self.nsubspace)
elif (self.model_name == 'GPLDR'):
(X_batch, batch_acq_value) = optimise_acqu_func_mledr(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
else:
(X_batch, batch_acq_value) = optimise_acqu_func_for_NN(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X, func_gradient=False)
new_batch_point = X_batch
temporal_X = np.copy(X)
temporal_Y = np.copy(Y)
k = 1
while (self.batch_size > 1):
(mu_new_batch_point, _) = self.model.predict(new_batch_point)
temporal_X = np.vstack((temporal_X, new_batch_point))
temporal_Y = np.vstack((temporal_Y, mu_new_batch_point))
self.model._update_model(temporal_X, temporal_Y)
if (self.model_name == 'GP'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
elif self.model_name.startswith('ADDGP'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_additive(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X, nsubspace=self.nsubspace)
elif (self.model_name == 'GPLDR'):
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_mledr(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
else:
(new_batch_point, next_batch_acq_value) = optimise_acqu_func_for_NN(acqu_func=self.acqu_func, bounds=self.bounds, X_ob=X)
X_batch = np.vstack((X_batch, new_batch_point))
batch_acq_value = np.append(batch_acq_value, next_batch_acq_value)
k += 1
return (X_batch, batch_acq_value)
|
class BaseModel():
@abstractmethod
def _create_model(self, X, Y):
raise NotImplementedError('')
@abstractmethod
def _update_model(self, X_all, Y_all, itr=0):
'\n Updates the model with new observations.\n '
return
@abstractmethod
def predict(self, X):
'\n Predictions with the model. Returns posterior means and standard deviations at X.\n '
return
@abstractmethod
def predict_withGradients(self, X):
'\n Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.\n '
return
|
def generate_attack_data_set(data, num_sample, img_offset, model, attack_type='targeted', random_target_class=None, shift_index=False):
'\n Generate the data for conducting attack. Only select the data being classified correctly.\n '
orig_img = []
orig_labels = []
target_labels = []
orig_img_id = []
pred_labels = np.argmax(model.model.predict(data.test_data), axis=1)
true_labels = np.argmax(data.test_labels, axis=1)
correct_data_indices = np.where([(1 if (x == y) else 0) for (x, y) in zip(pred_labels, true_labels)])
print('Total testing data:{}, correct classified data:{}'.format(len(data.test_labels), len(correct_data_indices[0])))
data.test_data = data.test_data[correct_data_indices]
data.test_labels = data.test_labels[correct_data_indices]
true_labels = true_labels[correct_data_indices]
np.random.seed(img_offset)
class_num = data.test_labels.shape[1]
for sample_index in range(num_sample):
if (attack_type == 'targeted'):
if (random_target_class is not None):
np.random.seed(0)
seq_imagenet = [1, 4, 6, 8, 9, 13, 15, 16, 19, 20, 22, 24, 25, 27, 28, 30, 34, 35, 36, 37, 38, 44, 49, 51, 56, 59, 60, 61, 62, 63, 67, 68, 70, 71, 74, 75, 76, 77, 78, 79, 82, 84, 85, 87, 88, 91, 94, 96, 97, 99]
seq = [seq_imagenet[(img_offset + sample_index)]]
while (seq == true_labels[(img_offset + sample_index)]):
seq = np.random.choice(random_target_class, 1)
else:
seq = list(range(class_num))
seq.remove(true_labels[(img_offset + sample_index)])
for s in seq:
if (shift_index and (s == 0)):
s += 1
orig_img.append(data.test_data[(img_offset + sample_index)])
target_labels.append(np.eye(class_num)[s])
orig_labels.append(data.test_labels[(img_offset + sample_index)])
orig_img_id.append((img_offset + sample_index))
elif (attack_type == 'untargeted'):
orig_img.append(data.test_data[(img_offset + sample_index)])
target_labels.append(data.test_labels[(img_offset + sample_index)])
orig_labels.append(data.test_labels[(img_offset + sample_index)])
orig_img_id.append((img_offset + sample_index))
orig_img = np.array(orig_img)
target_labels = np.array(target_labels)
orig_labels = np.array(orig_labels)
orig_img_id = np.array(orig_img_id)
return (orig_img, target_labels, orig_labels, orig_img_id)
|
def model_prediction(model, inputs):
prob = model.model.predict(inputs)
predicted_class = np.argmax(prob)
prob_str = np.array2string(prob).replace('\n', '')
return (prob, predicted_class, prob_str)
|
class NodeLookup(object):
"Converts integer node ID's to human readable labels."
def __init__(self, model_path='./', label_lookup_path=None):
model_path_dir = os.path.join(model_path, FLAGS.model_dir)
if (not label_lookup_path):
label_lookup_path = os.path.join(model_path_dir, 'labels.txt')
self.node_lookup = self.load(label_lookup_path)
def load(self, label_lookup_path):
'Loads a human readable English name for each softmax node.\n\n Args:\n label_lookup_path: string UID to integer node ID.\n uid_lookup_path: string UID to human-readable string.\n\n Returns:\n dict from integer node ID to human-readable string.\n '
if (not tf.gfile.Exists(label_lookup_path)):
tf.logging.fatal('File does not exist %s', label_lookup_path)
node_id_to_name = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line:
words = line.split(':')
target_class = int(words[0])
name = words[1]
node_id_to_name[target_class] = name
return node_id_to_name
def id_to_string(self, node_id):
if (node_id not in self.node_lookup):
return ''
return self.node_lookup[node_id]
|
def create_graph(model_path='./'):
'Creates a graph from saved GraphDef file and returns a saver.'
sys.argv = [sys.argv[0]]
model_path_dir = os.path.join(model_path, FLAGS.model_dir)
with tf.gfile.FastGFile(os.path.join(model_path_dir, 'frozen_inception_v3.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
|
def run_inference_on_image(image):
'Runs inference on an image. (Not updated, not working for inception v3 20160828)\n\n Args:\n image: Image file name.\n\n Returns:\n Nothing\n '
if (not tf.gfile.Exists(image)):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
create_graph()
with tf.Session() as sess:
img = tf.placeholder(tf.uint8, (299, 299, 3))
softmax_tensor = tf.import_graph_def(sess.graph.as_graph_def(), input_map={'DecodeJpeg:0': tf.reshape(img, (299, 299, 3))}, return_elements=['softmax/logits:0'])
dat = scipy.misc.imresize(scipy.misc.imread(image), (299, 299))
predictions = sess.run(softmax_tensor, {img: dat})
predictions = np.squeeze(predictions)
node_lookup = NodeLookup()
top_k = predictions.argsort()
for node_id in top_k:
print('id', node_id)
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print(('%s (score = %.5f)' % (human_string, score)))
|
class InceptionModelPrediction():
def __init__(self, sess, model_path, use_softmax=False):
self.model_path = model_path
self.sess = sess
self.use_softmax = use_softmax
if self.use_softmax:
output_name = 'InceptionV3/Predictions/Softmax:0'
else:
output_name = 'InceptionV3/Predictions/Reshape:0'
self.img = tf.placeholder(tf.float32, (None, 299, 299, 3))
self.softmax_tensor = tf.import_graph_def(sess.graph.as_graph_def(), input_map={'input:0': self.img}, return_elements=[output_name])
def predict(self, dat):
dat = np.squeeze(dat)
if (len(dat.shape) < 4):
scaled = dat.reshape(((1,) + dat.shape))
else:
scaled = dat
predictions = self.sess.run(self.softmax_tensor, {self.img: scaled})
predictions = np.squeeze(predictions)
return predictions
node_lookup = NodeLookup(model_path=self.model_path)
top_k = predictions.argsort()
for node_id in top_k:
print('id', node_id)
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print(('%s (score = %.5f)' % (human_string, score)))
return top_k[(- 1)]
|
class InceptionModel():
image_size = 299
num_labels = 1001
num_channels = 3
def __init__(self, model_path, use_softmax=False):
with tf.Session() as sess:
global CREATED_GRAPH
self.sess = sess
self.use_softmax = use_softmax
if (not CREATED_GRAPH):
create_graph(model_path=model_path)
CREATED_GRAPH = True
self.model = InceptionModelPrediction(sess, model_path, use_softmax)
def predict(self, img):
if self.use_softmax:
output_name = 'InceptionV3/Predictions/Softmax:0'
else:
output_name = 'InceptionV3/Predictions/Reshape:0'
if img.shape.as_list()[0]:
shape = (int(img.shape[0]), 1001)
softmax_tensor = tf.import_graph_def(self.sess.graph.as_graph_def(), input_map={'input:0': img, 'InceptionV3/Predictions/Shape:0': shape}, return_elements=[output_name])
else:
softmax_tensor = tf.import_graph_def(self.sess.graph.as_graph_def(), input_map={'input:0': img}, return_elements=[output_name])
return softmax_tensor[0]
|
def maybe_download_and_extract():
'Download and extract model tar file.'
dest_directory = FLAGS.model_dir
if (not os.path.exists(dest_directory)):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[(- 1)]
filepath = os.path.join(dest_directory, filename)
if (not os.path.exists(filepath)):
def _progress(count, block_size, total_size):
sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def main(_):
maybe_download_and_extract()
image = (FLAGS.image_file if FLAGS.image_file else os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
create_graph()
with tf.Session() as sess:
dat = np.array(scipy.misc.imresize(scipy.misc.imread(image), (299, 299)), dtype=np.float32)
dat /= 255.0
dat -= 0.5
model = InceptionModelPrediction(sess, True)
predictions = model.predict(dat)
node_lookup = NodeLookup()
top_k = predictions.argsort()
for node_id in top_k:
print('id', node_id)
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print(('%s (score = %.5f)' % (human_string, score)))
|
def readimg(f, force=False):
FILENAME_RE = re.compile('(\\d+).(\\d+).jpg')
img = scipy.misc.imread(f)
if ((img.shape[0] < 299) or (img.shape[1] < 299)):
return None
img = ((np.array(scipy.misc.imresize(img, (299, 299)), dtype=np.float32) / 255) - 0.5)
if (not force):
if (img.shape != (299, 299, 3)):
return None
else:
print('Force read {}'.format(f))
filename_search = FILENAME_RE.search(f)
return [img, int(filename_search.group(1))]
|
class ImageNet():
def __init__(self, data_path, targetFile=None, targetClass=None):
if (targetFile is None):
random.seed(5566)
from fnmatch import fnmatch
file_list = []
for (path, subdirs, files) in os.walk(data_path):
for name in files:
if fnmatch(name, '*.jpg'):
file_list.append(os.path.join(path, name))
random.shuffle(file_list)
FILENAME_RE = re.compile('(\\d+).(\\d+).jpg')
temp_data = []
temp_labels = []
for f in file_list:
img = scipy.misc.imread(f)
if ((img.shape[0] < 299) or (img.shape[1] < 299)):
continue
img = ((np.array(scipy.misc.imresize(img, (299, 299)), dtype=np.float32) / 255) - 0.5)
if (img.shape != (299, 299, 3)):
continue
img = np.expand_dims(img, axis=0)
temp_data.append(img)
filename_search = FILENAME_RE.search(f)
temp_labels.append(int(filename_search.group(1)))
data_num = len(temp_data)
print('Imagenet load # testing images:{}'.format(data_num))
temp_data = np.concatenate(temp_data)
temp_labels = np.array(temp_labels)
self.test_data = temp_data
self.test_labels = np.zeros((data_num, 1001))
self.test_labels[(np.arange(data_num), temp_labels)] = 1
else:
print('Target file:{}'.format(targetFile))
(temp_data, temp_label) = readimg(targetFile, force=True)
self.test_data = np.array(temp_data)
temp_label = np.array(temp_label)
self.test_labels = np.zeros((1, 1001))
self.test_labels[(0, temp_label)] = 1
print('Read target file {}'.format(targetFile))
|
class ImageNetDataGen():
def __init__(self, train_dir, validate_dir, batch_size=100, data_augmentation=True):
if data_augmentation:
print('Enable data augmentation')
train_datagen = ImageDataGenerator(preprocessing_function=(lambda x: ((x / 255) - 0.5)), shear_range=0.2, zoom_range=0.2, width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, fill_mode='nearest')
else:
print('Disable data augmentation')
train_datagen = ImageDataGenerator(preprocessing_function=(lambda x: ((x / 255) - 0.5)))
validation_datagen = ImageDataGenerator(preprocessing_function=(lambda x: ((x / 255) - 0.5)))
train_generator_flow = train_datagen.flow_from_directory(train_dir, target_size=(299, 299), batch_size=batch_size, class_mode='input')
validation_generator_flow = validation_datagen.flow_from_directory(validate_dir, target_size=(299, 299), batch_size=batch_size, class_mode='input')
self.train_generator_flow = train_generator_flow
self.validation_generator_flow = validation_generator_flow
|
class ImageNetDataNP():
def __init__(self, folder_path):
test_data = np.load(os.path.join(folder_path, 'imagenet_test_data.npy'))
test_labels = np.load(os.path.join(folder_path, 'imagenet_test_labels.npy'))
self.test_data = test_data
self.test_labels = test_labels
|
def read_file_list(filename, remove_bounds):
'\n Reads a trajectory from a text file. \n \n File format:\n The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)\n and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. \n \n Input:\n filename -- File name\n \n Output:\n dict -- dictionary of (stamp,data) tuples\n \n '
file = open(filename)
data = file.read()
lines = data.replace(',', ' ').replace('\t', ' ').split('\n')
if remove_bounds:
lines = lines[100:(- 100)]
list = [[v.strip() for v in line.split(' ') if (v.strip() != '')] for line in lines if ((len(line) > 0) and (line[0] != '#'))]
list = [(float(l[0]), l[1:]) for l in list if (len(l) > 1)]
return dict(list)
|
def associate(first_list, second_list, offset, max_difference):
'\n Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim \n to find the closest match for every input tuple.\n \n Input:\n first_list -- first dictionary of (stamp,data) tuples\n second_list -- second dictionary of (stamp,data) tuples\n offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)\n max_difference -- search radius for candidate generation\n\n Output:\n matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))\n \n '
first_keys = first_list.keys()
second_keys = second_list.keys()
potential_matches = [(abs((a - (b + offset))), a, b) for a in first_keys for b in second_keys if (abs((a - (b + offset))) < max_difference)]
potential_matches.sort()
matches = []
for (diff, a, b) in potential_matches:
if ((a in first_keys) and (b in second_keys)):
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
|
def sqdist(X, Y):
assert (X.size()[1] == Y.size()[1]), 'dims do not match'
return ((X.reshape(X.size()[0], 1, X.size()[1]) - Y.reshape(1, Y.size()[0], Y.size()[1])) ** 2).sum(2)
|
class Constant(nn.Module):
def __init__(self, variance=1.0):
super(Constant, self).__init__()
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if (X2 is None):
shape = [X.size()[0], X.size()[0]]
else:
shape = [X.size()[0], X2.size()[0]]
return (transform_forward(self.variance) * torch.ones(shape[0], shape[1]))
|
class RBF(nn.Module):
def __init__(self, dim, variance=1.0, lengthscale=None):
super(RBF, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
if (lengthscale is None):
self.lengthscale = torch.nn.Parameter(transform_backward(torch.ones(1, dim)))
else:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.tensor(lengthscale)))
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if (X2 is None):
X2 = X
l = transform_forward(self.lengthscale)
return (transform_forward(self.variance) * ((- 0.5) * sqdist((X / l), (X2 / l))).exp())
|
class Linear(nn.Module):
def __init__(self, dim, variance=1.0, lengthscale=None):
super(Linear, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
if (lengthscale is None):
self.lengthscale = torch.nn.Parameter(transform_backward(torch.ones(1, dim)))
else:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.tensor(lengthscale)))
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if (X2 is None):
X2 = X
l = transform_forward(self.lengthscale)
return (transform_forward(self.variance) * torch.mm((X / l), (X2 / l).t()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.