python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
"""
# Code adapted from:
# https://github.com/Cadene/pretrained-models.pytorch
#
# BSD 3-Clause License
#
# Copyright (c) 2017, Remi Cadene
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import logging
from collections import OrderedDict
import math
import torch.nn as nn
from torch.utils import model_zoo
import network.mynn as mynn
__all__ = ['SENet', 'se_resnext50_32x4d', 'se_resnext101_32x4d']
pretrained_settings = {
'se_resnext50_32x4d': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
'se_resnext101_32x4d': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
},
}
class SEModule(nn.Module):
"""
Sequeeze Excitation Module
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = mynn.Norm2d(planes * 2)
self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,
stride=stride, padding=1, groups=groups,
bias=False)
self.bn2 = mynn.Norm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,
bias=False)
self.bn3 = mynn.Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,
stride=stride)
self.bn1 = mynn.Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,
groups=groups, bias=False)
self.bn2 = mynn.Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = mynn.Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,
stride=1)
self.bn1 = mynn.Norm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = mynn.Norm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = mynn.Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
"""
Main Squeeze Excitation Network Module
"""
def __init__(self, block, layers, groups, reduction, dropout_p=0.2,
inplanes=128, input_3x3=True, downsample_kernel_size=3,
downsample_padding=1, num_classes=1000):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,
bias=False)),
('bn1', mynn.Norm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,
bias=False)),
('bn2', mynn.Norm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,
bias=False)),
('bn3', mynn.Norm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,
padding=3, bias=False)),
('bn1', mynn.Norm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True`
# is used instead of `padding=1`.
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,
ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=1,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=1,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=downsample_kernel_size, stride=stride,
padding=downsample_padding, bias=False),
mynn.Norm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, groups, reduction, stride,
downsample))
self.inplanes = planes * block.expansion
for index in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def features(self, x):
"""
Forward Pass through the each layer of SE network
"""
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
"""
AvgPool and Linear Layer
"""
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def initialize_pretrained_model(model, num_classes, settings):
"""
Initialize Pretrain Model Information,
Dowload weights, load weights, set variables
"""
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
weights = model_zoo.load_url(settings['url'])
model.load_state_dict(weights)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
def se_resnext50_32x4d(num_classes=1000):
"""
Defination For SE Resnext50
"""
model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
settings = pretrained_settings['se_resnext50_32x4d']['imagenet']
initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnext101_32x4d(num_classes=1000):
"""
Defination For SE Resnext101
"""
model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
settings = pretrained_settings['se_resnext101_32x4d']['imagenet']
initialize_pretrained_model(model, num_classes, settings)
return model
| semantic-segmentation-main | network/SEresnext.py |
"""
# Code adapted from:
# https://github.com/mapillary/inplace_abn/
#
# BSD 3-Clause License
#
# Copyright (c) 2017, mapillary
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import logging
import sys
from collections import OrderedDict
from functools import partial
import torch.nn as nn
import torch
import network.mynn as mynn
from config import cfg
def bnrelu(channels):
"""
Single Layer BN and Relui
"""
return nn.Sequential(mynn.Norm2d(channels),
nn.ReLU(inplace=True))
class GlobalAvgPool2d(nn.Module):
"""
Global average pooling over the input's spatial dimensions
"""
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
logging.info("Global Average Pooling Initialized")
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
class IdentityResidualBlock(nn.Module):
"""
Identity Residual Block for WideResnet
"""
def __init__(self,
in_channels,
channels,
stride=1,
dilation=1,
groups=1,
norm_act=bnrelu,
dropout=None,
dist_bn=False
):
"""Configurable identity-mapping residual block
Parameters
----------
in_channels : int
Number of input channels.
channels : list of int
Number of channels in the internal feature maps.
Can either have two or three elements: if three construct
a residual block with two `3 x 3` convolutions,
otherwise construct a bottleneck block with `1 x 1`, then
`3 x 3` then `1 x 1` convolutions.
stride : int
Stride of the first `3 x 3` convolution
dilation : int
Dilation to apply to the `3 x 3` convolutions.
groups : int
Number of convolution groups.
This is used to create ResNeXt-style blocks and is only compatible with
bottleneck blocks.
norm_act : callable
Function to create normalization / activation Module.
dropout: callable
Function to create Dropout Module.
dist_bn: Boolean
A variable to enable or disable use of distributed BN
"""
super(IdentityResidualBlock, self).__init__()
self.dist_bn = dist_bn
# Check if we are using distributed BN and use the nn from encoding.nn
# library rather than using standard pytorch.nn
# Check parameters for inconsistencies
if len(channels) != 2 and len(channels) != 3:
raise ValueError("channels must contain either two or three values")
if len(channels) == 2 and groups != 1:
raise ValueError("groups > 1 are only valid if len(channels) == 3")
is_bottleneck = len(channels) == 3
need_proj_conv = stride != 1 or in_channels != channels[-1]
self.bn1 = norm_act(in_channels)
if not is_bottleneck:
layers = [
("conv1", nn.Conv2d(in_channels,
channels[0],
3,
stride=stride,
padding=dilation,
bias=False,
dilation=dilation)),
("bn2", norm_act(channels[0])),
("conv2", nn.Conv2d(channels[0], channels[1],
3,
stride=1,
padding=dilation,
bias=False,
dilation=dilation))
]
if dropout is not None:
layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
else:
layers = [
("conv1",
nn.Conv2d(in_channels,
channels[0],
1,
stride=stride,
padding=0,
bias=False)),
("bn2", norm_act(channels[0])),
("conv2", nn.Conv2d(channels[0],
channels[1],
3, stride=1,
padding=dilation, bias=False,
groups=groups,
dilation=dilation)),
("bn3", norm_act(channels[1])),
("conv3", nn.Conv2d(channels[1], channels[2],
1, stride=1, padding=0, bias=False))
]
if dropout is not None:
layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(
in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)
def forward(self, x):
"""
This is the standard forward function for non-distributed batch norm
"""
if hasattr(self, "proj_conv"):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out
class WiderResNet(nn.Module):
"""
WideResnet Global Module for Initialization
"""
def __init__(self,
structure,
norm_act=bnrelu,
classes=0
):
"""Wider ResNet with pre-activation (identity mapping) blocks
Parameters
----------
structure : list of int
Number of residual blocks in each of the six modules of the network.
norm_act : callable
Function to create normalization / activation Module.
classes : int
If not `0` also include global average pooling and \
a fully-connected layer with `classes` outputs at the end
of the network.
"""
super(WiderResNet, self).__init__()
self.structure = structure
if len(structure) != 6:
raise ValueError("Expected a structure with six values")
# Initial layers
self.mod1 = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))
]))
# Groups of residual blocks
in_channels = 64
channels = [(128, 128), (256, 256), (512, 512), (512, 1024),
(512, 1024, 2048), (1024, 2048, 4096)]
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels, channels[mod_id],
norm_act=norm_act)
))
# Update channels and p_keep
in_channels = channels[mod_id][-1]
# Create module
if mod_id <= 4:
self.add_module("pool%d" %
(mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1))
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def forward(self, img):
out = self.mod1(img)
out = self.mod2(self.pool2(out))
out = self.mod3(self.pool3(out))
out = self.mod4(self.pool4(out))
out = self.mod5(self.pool5(out))
out = self.mod6(self.pool6(out))
out = self.mod7(out)
out = self.bn_out(out)
if hasattr(self, "classifier"):
out = self.classifier(out)
return out
class WiderResNetA2(nn.Module):
"""
Wider ResNet with pre-activation (identity mapping) blocks
This variant uses down-sampling by max-pooling in the first two blocks and
by strided convolution in the others.
Parameters
----------
structure : list of int
Number of residual blocks in each of the six modules of the network.
norm_act : callable
Function to create normalization / activation Module.
classes : int
If not `0` also include global average pooling and a fully-connected layer
with `classes` outputs at the end
of the network.
dilation : bool
If `True` apply dilation to the last three modules and change the
down-sampling factor from 32 to 8.
"""
def __init__(self,
structure,
norm_act=bnrelu,
classes=0,
dilation=False,
dist_bn=False
):
super(WiderResNetA2, self).__init__()
self.dist_bn = dist_bn
# If using distributed batch norm, use the encoding.nn as oppose to torch.nn
nn.Dropout = nn.Dropout2d
norm_act = bnrelu
self.structure = structure
self.dilation = dilation
if len(structure) != 6:
raise ValueError("Expected a structure with six values")
# Initial layers
self.mod1 = torch.nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))
]))
# Groups of residual blocks
in_channels = 64
channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048),
(1024, 2048, 4096)]
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
if not dilation:
dil = 1
stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1
else:
if mod_id == 3:
dil = 2
elif mod_id > 3:
dil = 4
else:
dil = 1
stride = 2 if block_id == 0 and mod_id == 2 else 1
if mod_id == 4:
drop = partial(nn.Dropout, p=0.3)
elif mod_id == 5:
drop = partial(nn.Dropout, p=0.5)
else:
drop = None
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels,
channels[mod_id], norm_act=norm_act,
stride=stride, dilation=dil,
dropout=drop, dist_bn=self.dist_bn)
))
# Update channels and p_keep
in_channels = channels[mod_id][-1]
# Create module
if mod_id < 2:
self.add_module("pool%d" %
(mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1))
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def forward(self, img):
out = self.mod1(img)
out = self.mod2(self.pool2(out)) # s2
out = self.mod3(self.pool3(out)) # s4
out = self.mod4(out) # s8
out = self.mod5(out)
out = self.mod6(out)
out = self.mod7(out)
out = self.bn_out(out)
if hasattr(self, "classifier"):
return self.classifier(out)
return out
_NETS = {
"16": {"structure": [1, 1, 1, 1, 1, 1]},
"20": {"structure": [1, 1, 1, 3, 1, 1]},
"38": {"structure": [3, 3, 6, 3, 1, 1]},
}
__all__ = []
for name, params in _NETS.items():
net_name = "wider_resnet" + name
setattr(sys.modules[__name__], net_name, partial(WiderResNet, **params))
__all__.append(net_name)
for name, params in _NETS.items():
net_name = "wider_resnet" + name + "_a2"
setattr(sys.modules[__name__], net_name, partial(WiderResNetA2, **params))
__all__.append(net_name)
class wrn38(nn.Module):
"""
This is wider resnet 38, output_stride=8
"""
def __init__(self, pretrained=True):
super(wrn38, self).__init__()
wide_resnet = wider_resnet38_a2(classes=1000, dilation=True)
wide_resnet = torch.nn.DataParallel(wide_resnet)
if pretrained:
pretrained_model = cfg.MODEL.WRN38_CHECKPOINT
checkpoint = torch.load(pretrained_model, map_location='cpu')
wide_resnet.load_state_dict(checkpoint['state_dict'])
del checkpoint
wide_resnet = wide_resnet.module
# print(wide_resnet)
self.mod1 = wide_resnet.mod1
self.mod2 = wide_resnet.mod2
self.mod3 = wide_resnet.mod3
self.mod4 = wide_resnet.mod4
self.mod5 = wide_resnet.mod5
self.mod6 = wide_resnet.mod6
self.mod7 = wide_resnet.mod7
self.pool2 = wide_resnet.pool2
self.pool3 = wide_resnet.pool3
del wide_resnet
def forward(self, x):
x = self.mod1(x)
x = self.mod2(self.pool2(x)) # s2
s2_features = x
x = self.mod3(self.pool3(x)) # s4
s4_features = x
x = self.mod4(x)
x = self.mod5(x)
x = self.mod6(x)
x = self.mod7(x)
return s2_features, s4_features, x
class wrn38_gscnn(wrn38):
def __init__(self, pretrained=True):
super(wrn38_gscnn, self).__init__(pretrained=pretrained)
def forward(self, x):
m1 = self.mod1(x)
m2 = self.mod2(self.pool2(m1))
m3 = self.mod3(self.pool3(m2))
m4 = self.mod4(m3)
m5 = self.mod5(m4)
m6 = self.mod6(m5)
m7 = self.mod7(m6)
return m1, m2, m3, m4, m5, m6, m7
| semantic-segmentation-main | network/wider_resnet.py |
"""
Network Initializations
"""
import importlib
import torch
from runx.logx import logx
from config import cfg
def get_net(args, criterion):
"""
Get Network Architecture based on arguments provided
"""
net = get_model(network='network.' + args.arch,
num_classes=cfg.DATASET.NUM_CLASSES,
criterion=criterion)
num_params = sum([param.nelement() for param in net.parameters()])
logx.msg('Model params = {:2.1f}M'.format(num_params / 1000000))
net = net.cuda()
return net
def is_gscnn_arch(args):
"""
Network is a GSCNN network
"""
return 'gscnn' in args.arch
def wrap_network_in_dataparallel(net, use_apex_data_parallel=False):
"""
Wrap the network in Dataparallel
"""
if use_apex_data_parallel:
import apex
net = apex.parallel.DistributedDataParallel(net)
else:
net = torch.nn.DataParallel(net)
return net
def get_model(network, num_classes, criterion):
"""
Fetch Network Function Pointer
"""
module = network[:network.rfind('.')]
model = network[network.rfind('.') + 1:]
mod = importlib.import_module(module)
net_func = getattr(mod, model)
net = net_func(num_classes=num_classes, criterion=criterion)
return net
| semantic-segmentation-main | network/__init__.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from network.mynn import initialize_weights, Norm2d, Upsample
from network.mynn import ResizeX, scale_as
from network.utils import get_aspp, get_trunk
from config import cfg
class ASDV3P(nn.Module):
"""
DeepLabV3+ with Attention-to-scale style attention
Attn head:
conv 3x3 512 ch
relu
conv 1x1 3 ch -> 1.0, 0.75, 0.5
train with 3 output scales: 0.5, 1.0, 2.0
min/max scale aug set to [0.5, 1.0]
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
use_dpc=False, fuse_aspp=False, attn_2b=False, bn_head=False):
super(ASDV3P, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8,
dpc=use_dpc)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
# Semantic segmentation prediction head
self.final = nn.Sequential(
nn.Conv2d(256 + 48, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, kernel_size=1, bias=False))
# Scale-attention prediction head
assert cfg.MODEL.N_SCALES is not None
self.scales = sorted(cfg.MODEL.N_SCALES)
num_scales = len(self.scales)
if cfg.MODEL.ATTNSCALE_BN_HEAD or bn_head:
self.scale_attn = nn.Sequential(
nn.Conv2d(num_scales * (256 + 48), 256, kernel_size=3,
padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_scales, kernel_size=1, bias=False))
else:
self.scale_attn = nn.Sequential(
nn.Conv2d(num_scales * (256 + 48), 512, kernel_size=3,
padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(512, num_scales, kernel_size=1, padding=1,
bias=False))
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.bot_fine)
initialize_weights(self.bot_aspp)
initialize_weights(self.scale_attn)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def _fwd(self, x, aspp_lo=None, aspp_attn=None):
"""
Run the network, and return final feature and logit predictions
"""
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
conv_aspp = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
conv_aspp = Upsample(conv_aspp, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
final = self.final(cat_s4)
out = Upsample(final, x_size[2:])
return out, cat_s4
def _forward_fused(self, inputs):
"""
Combine multiple scales of predictions together with attention
predicted jointly off of multi-scale features.
"""
x_1x = inputs['images']
# run 1x scale
assert 1.0 in self.scales, 'expected one of scales to be 1.0'
ps = {}
ps[1.0], feats_1x = self._fwd(x_1x)
concat_feats = [feats_1x]
# run all other scales
for scale in self.scales:
if scale == 1.0:
continue
resized_x = ResizeX(x_1x, scale)
p, feats = self._fwd(resized_x)
ps[scale] = scale_as(p, x_1x)
feats = scale_as(feats, feats_1x)
concat_feats.append(feats)
concat_feats = torch.cat(concat_feats, 1)
attn_tensor = self.scale_attn(concat_feats)
output = None
for idx, scale in enumerate(self.scales):
attn = attn_tensor[:, idx:idx+1, :, :]
attn_1x_scale = scale_as(attn, x_1x)
if output is None:
# logx.msg(f'ps[scale] shape {ps[scale].shape} '
# f'attn shape {attn_1x_scale.shape}')
output = ps[scale] * attn_1x_scale
else:
output += ps[scale] * attn_1x_scale
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(output, gts)
if cfg.LOSS.SUPERVISED_MSCALE_WT:
for scale in self.scales:
loss_scale = self.criterion(ps[scale], gts, do_rmi=False)
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_scale
return loss
else:
return output, attn
def forward(self, inputs):
# FIXME: could add other assets for visualization
return {'pred': self._forward_fused(inputs)}
def DeepV3R50(num_classes, criterion):
return ASDV3P(num_classes, trunk='resnet-50', criterion=criterion)
# Batch-norm head
def DeepV3R50B(num_classes, criterion):
return ASDV3P(num_classes, trunk='resnet-50', criterion=criterion,
bn_head=True)
def DeepV3W38(num_classes, criterion):
return ASDV3P(num_classes, trunk='wrn38', criterion=criterion)
class ASDV3P_Paired(nn.Module):
"""
DeepLabV3+ with Attention-to-scale style attention
Attn head:
conv 3x3 512 ch
relu
conv 1x1 3 ch -> 1.0, 0.75, 0.5
train with 3 output scales: 0.5, 1.0, 2.0
min/max scale aug set to [0.5, 1.0]
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
use_dpc=False, fuse_aspp=False, attn_2b=False, bn_head=False):
super(ASDV3P_Paired, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8,
dpc=use_dpc)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
# Semantic segmentation prediction head
self.final = nn.Sequential(
nn.Conv2d(256 + 48, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, kernel_size=1, bias=False))
# Scale-attention prediction head
assert cfg.MODEL.N_SCALES is not None
self.trn_scales = (0.5, 1.0)
self.inf_scales = sorted(cfg.MODEL.N_SCALES)
num_scales = 2
if cfg.MODEL.ATTNSCALE_BN_HEAD or bn_head:
self.scale_attn = nn.Sequential(
nn.Conv2d(num_scales * (256 + 48), 256, kernel_size=3,
padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_scales, kernel_size=1, bias=False),
nn.Sigmoid())
else:
self.scale_attn = nn.Sequential(
nn.Conv2d(num_scales * (256 + 48), 512, kernel_size=3,
padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(512, num_scales, kernel_size=1, padding=1,
bias=False))
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.bot_fine)
initialize_weights(self.bot_aspp)
initialize_weights(self.scale_attn)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def _fwd(self, x, aspp_lo=None, aspp_attn=None):
"""
Run the network, and return final feature and logit predictions
"""
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
conv_aspp = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
conv_aspp = Upsample(conv_aspp, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
final = self.final(cat_s4)
out = Upsample(final, x_size[2:])
return out, cat_s4
def _forward_paired(self, inputs, scales):
"""
Hierarchical form of attention where we only predict attention for
pairs of scales at a time.
At inference time we can combine many scales together.
"""
x_1x = inputs['images']
# run 1x scale
assert 1.0 in scales, 'expected one of scales to be 1.0'
ps = {}
all_feats = {}
ps[1.0], all_feats[1.0] = self._fwd(x_1x)
# run all other scales
for scale in scales:
if scale == 1.0:
continue
resized_x = ResizeX(x_1x, scale)
p, feats = self._fwd(resized_x)
ps[scale] = scale_as(p, x_1x)
all_feats[scale] = scale_as(feats, all_feats[1.0])
# Generate all attention outputs
output = None
num_scales = len(scales)
attn = {}
for idx in range(num_scales - 1):
lo_scale = scales[idx]
hi_scale = scales[idx + 1]
concat_feats = torch.cat([all_feats[lo_scale],
all_feats[hi_scale]], 1)
p_attn = self.scale_attn(concat_feats)
attn[lo_scale] = scale_as(p_attn, x_1x)
# Normalize attentions
norm_attn = {}
last_attn = None
for idx in range(num_scales - 1):
lo_scale = scales[idx]
hi_scale = scales[idx + 1]
attn_lo = attn[lo_scale][:, 0:1, :, :]
attn_hi = attn[lo_scale][:, 1:2, :, :]
if last_attn is None:
norm_attn[lo_scale] = attn_lo
norm_attn[hi_scale] = attn_hi
else:
normalize_this_attn = last_attn / (attn_lo + attn_hi)
norm_attn[lo_scale] = attn_lo * normalize_this_attn
norm_attn[hi_scale] = attn_hi * normalize_this_attn
last_attn = attn_hi
# Apply attentions
for idx, scale in enumerate(scales):
attn = norm_attn[scale]
attn_1x_scale = scale_as(attn, x_1x)
if output is None:
output = ps[scale] * attn_1x_scale
else:
output += ps[scale] * attn_1x_scale
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(output, gts)
return loss
else:
return output, attn
def forward(self, inputs):
if self.training:
return self._forward_paired(inputs, self.trn_scales)
else:
return {'pred': self._forward_paired(inputs, self.inf_scales)}
# Batch-norm head with paired attention
def DeepV3R50BP(num_classes, criterion):
return ASDV3P_Paired(num_classes, trunk='resnet-50', criterion=criterion,
bn_head=True)
| semantic-segmentation-main | network/attnscale.py |
"""
# Code Adapted from:
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
#
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import network.mynn as mynn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""
Basic Block for Resnet
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = mynn.Norm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = mynn.Norm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""
Bottleneck Layer for Resnet
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = mynn.Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = mynn.Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = mynn.Norm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
Resnet Global Module for Initialization
"""
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = mynn.Norm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
mynn.Norm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for index in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| semantic-segmentation-main | network/Resnet.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from torch import nn
from network.mynn import initialize_weights, Upsample
from network.mynn import scale_as
from network.utils import get_aspp, get_trunk, make_seg_head
from config import cfg
class Basic(nn.Module):
"""
Basic segmentation network, no ASPP, no Mscale
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(Basic, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.seg_head = make_seg_head(in_ch=high_level_ch,
out_ch=num_classes)
initialize_weights(self.seg_head)
def forward(self, inputs):
x = inputs['images']
_, _, final_features = self.backbone(x)
pred = self.seg_head(final_features)
pred = scale_as(pred, x)
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
output_dict = {'pred': pred}
return output_dict
class ASPP(nn.Module):
"""
ASPP-based Segmentation network
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(ASPP, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=cfg.MODEL.ASPP_BOT_CH,
output_stride=8)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.final = make_seg_head(in_ch=256,
out_ch=num_classes)
initialize_weights(self.final, self.bot_aspp, self.aspp)
def forward(self, inputs):
x = inputs['images']
x_size = x.size()
_, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
aspp = self.bot_aspp(aspp)
pred = self.final(aspp)
pred = Upsample(pred, x_size[2:])
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
output_dict = {'pred': pred}
return output_dict
def HRNet(num_classes, criterion, s2s4=None):
return Basic(num_classes=num_classes, criterion=criterion,
trunk='hrnetv2')
def HRNet_ASP(num_classes, criterion, s2s4=None):
return ASPP(num_classes=num_classes, criterion=criterion,
trunk='hrnetv2')
| semantic-segmentation-main | network/basic.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
from torch import nn
from network.mynn import Norm2d, Upsample
from network.xception import xception71
from network.wider_resnet import wrn38
from network.SEresnext import se_resnext50_32x4d, se_resnext101_32x4d
from network.Resnet import resnet50, resnet101
import network.hrnetv2 as hrnetv2
from runx.logx import logx
from config import cfg
class get_resnet(nn.Module):
def __init__(self, trunk_name, output_stride=8):
super(get_resnet, self).__init__()
if trunk_name == 'seresnext-50':
resnet = se_resnext50_32x4d()
elif trunk_name == 'seresnext-101':
resnet = se_resnext101_32x4d()
elif trunk_name == 'resnet-50':
resnet = resnet50()
resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1,
resnet.relu, resnet.maxpool)
elif trunk_name == 'resnet-101':
resnet = resnet101()
resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1,
resnet.relu, resnet.maxpool)
else:
raise ValueError("Not a valid network arch")
self.layer0 = resnet.layer0
self.layer1, self.layer2, self.layer3, self.layer4 = \
resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
if output_stride == 8:
for n, m in self.layer3.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
for n, m in self.layer4.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
elif output_stride == 16:
for n, m in self.layer4.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
else:
raise 'unsupported output_stride {}'.format(output_stride)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
s2_features = x
x = self.layer2(x)
s4_features = x
x = self.layer3(x)
x = self.layer4(x)
return s2_features, s4_features, x
def get_trunk(trunk_name, output_stride=8):
"""
Retrieve the network trunk and channel counts.
"""
assert output_stride == 8, 'Only stride8 supported right now'
if trunk_name == 'wrn38':
#
# FIXME: pass in output_stride once we support stride 16
#
backbone = wrn38(pretrained=True)
s2_ch = 128
s4_ch = 256
high_level_ch = 4096
elif trunk_name == 'xception71':
backbone = xception71(output_stride=output_stride, BatchNorm=Norm2d,
pretrained=True)
s2_ch = 64
s4_ch = 128
high_level_ch = 2048
elif trunk_name == 'seresnext-50' or trunk_name == 'seresnext-101':
backbone = get_resnet(trunk_name, output_stride=output_stride)
s2_ch = 48
s4_ch = -1
high_level_ch = 2048
elif trunk_name == 'resnet-50' or trunk_name == 'resnet-101':
backbone = get_resnet(trunk_name, output_stride=output_stride)
s2_ch = 256
s4_ch = -1
high_level_ch = 2048
elif trunk_name == 'hrnetv2':
backbone = hrnetv2.get_seg_model()
high_level_ch = backbone.high_level_ch
s2_ch = -1
s4_ch = -1
else:
raise 'unknown backbone {}'.format(trunk_name)
logx.msg("Trunk: {}".format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
# https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=Norm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class AtrousSpatialPyramidPoolingModule(nn.Module):
"""
operations performed:
1x1 x depth
3x3 x depth dilation 6
3x3 x depth dilation 12
3x3 x depth dilation 18
image pooling
concatenate all together
Final 1x1 conv
"""
def __init__(self, in_dim, reduction_dim=256, output_stride=16,
rates=(6, 12, 18)):
super(AtrousSpatialPyramidPoolingModule, self).__init__()
if output_stride == 8:
rates = [2 * r for r in rates]
elif output_stride == 16:
pass
else:
raise 'output stride of {} not supported'.format(output_stride)
self.features = []
# 1x1
self.features.append(
nn.Sequential(nn.Conv2d(in_dim, reduction_dim, kernel_size=1,
bias=False),
Norm2d(reduction_dim), nn.ReLU(inplace=True)))
# other rates
for r in rates:
self.features.append(nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=3,
dilation=r, padding=r, bias=False),
Norm2d(reduction_dim),
nn.ReLU(inplace=True)
))
self.features = nn.ModuleList(self.features)
# img level features
self.img_pooling = nn.AdaptiveAvgPool2d(1)
self.img_conv = nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
Norm2d(reduction_dim), nn.ReLU(inplace=True))
def forward(self, x):
x_size = x.size()
img_features = self.img_pooling(x)
img_features = self.img_conv(img_features)
img_features = Upsample(img_features, x_size[2:])
out = img_features
for f in self.features:
y = f(x)
out = torch.cat((out, y), 1)
return out
class ASPP_edge(AtrousSpatialPyramidPoolingModule):
def __init__(self, in_dim, reduction_dim=256, output_stride=16,
rates=(6, 12, 18)):
super(ASPP_edge, self).__init__(in_dim=in_dim,
reduction_dim=reduction_dim,
output_stride=output_stride,
rates=rates)
self.edge_conv = nn.Sequential(
nn.Conv2d(1, reduction_dim, kernel_size=1, bias=False),
Norm2d(reduction_dim), nn.ReLU(inplace=True))
def forward(self, x, edge):
x_size = x.size()
img_features = self.img_pooling(x)
img_features = self.img_conv(img_features)
img_features = Upsample(img_features, x_size[2:])
out = img_features
edge_features = Upsample(edge, x_size[2:])
edge_features = self.edge_conv(edge_features)
out = torch.cat((out, edge_features), 1)
for f in self.features:
y = f(x)
out = torch.cat((out, y), 1)
return out
def dpc_conv(in_dim, reduction_dim, dil, separable):
if separable:
groups = reduction_dim
else:
groups = 1
return nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=3, dilation=dil,
padding=dil, bias=False, groups=groups),
nn.BatchNorm2d(reduction_dim),
nn.ReLU(inplace=True)
)
class DPC(nn.Module):
'''
From: Searching for Efficient Multi-scale architectures for dense
prediction
'''
def __init__(self, in_dim, reduction_dim=256, output_stride=16,
rates=[(1, 6), (18, 15), (6, 21), (1, 1), (6, 3)],
dropout=False, separable=False):
super(DPC, self).__init__()
self.dropout = dropout
if output_stride == 8:
rates = [(2 * r[0], 2 * r[1]) for r in rates]
elif output_stride == 16:
pass
else:
raise 'output stride of {} not supported'.format(output_stride)
self.a = dpc_conv(in_dim, reduction_dim, rates[0], separable)
self.b = dpc_conv(reduction_dim, reduction_dim, rates[1], separable)
self.c = dpc_conv(reduction_dim, reduction_dim, rates[2], separable)
self.d = dpc_conv(reduction_dim, reduction_dim, rates[3], separable)
self.e = dpc_conv(reduction_dim, reduction_dim, rates[4], separable)
self.drop = nn.Dropout(p=0.1)
def forward(self, x):
a = self.a(x)
b = self.b(a)
c = self.c(a)
d = self.d(a)
e = self.e(b)
out = torch.cat((a, b, c, d, e), 1)
if self.dropout:
out = self.drop(out)
return out
def get_aspp(high_level_ch, bottleneck_ch, output_stride, dpc=False):
"""
Create aspp block
"""
if dpc:
aspp = DPC(high_level_ch, bottleneck_ch, output_stride=output_stride)
else:
aspp = AtrousSpatialPyramidPoolingModule(high_level_ch, bottleneck_ch,
output_stride=output_stride)
aspp_out_ch = 5 * bottleneck_ch
return aspp, aspp_out_ch
def BNReLU(ch):
return nn.Sequential(
Norm2d(ch),
nn.ReLU())
def make_seg_head(in_ch, out_ch):
bot_ch = cfg.MODEL.SEGATTN_BOT_CH
return nn.Sequential(
nn.Conv2d(in_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, out_ch, kernel_size=1, bias=False))
def init_attn(m):
for module in m.modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.zeros_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0.5)
elif isinstance(module, cfg.MODEL.BNFUNC):
module.weight.data.fill_(1)
module.bias.data.zero_()
def make_attn_head(in_ch, out_ch):
bot_ch = cfg.MODEL.SEGATTN_BOT_CH
if cfg.MODEL.MSCALE_OLDARCH:
return old_make_attn_head(in_ch, bot_ch, out_ch)
od = OrderedDict([('conv0', nn.Conv2d(in_ch, bot_ch, kernel_size=3,
padding=1, bias=False)),
('bn0', Norm2d(bot_ch)),
('re0', nn.ReLU(inplace=True))])
if cfg.MODEL.MSCALE_INNER_3x3:
od['conv1'] = nn.Conv2d(bot_ch, bot_ch, kernel_size=3, padding=1,
bias=False)
od['bn1'] = Norm2d(bot_ch)
od['re1'] = nn.ReLU(inplace=True)
if cfg.MODEL.MSCALE_DROPOUT:
od['drop'] = nn.Dropout(0.5)
od['conv2'] = nn.Conv2d(bot_ch, out_ch, kernel_size=1, bias=False)
od['sig'] = nn.Sigmoid()
attn_head = nn.Sequential(od)
# init_attn(attn_head)
return attn_head
def old_make_attn_head(in_ch, bot_ch, out_ch):
attn = nn.Sequential(
nn.Conv2d(in_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, out_ch, kernel_size=out_ch, bias=False),
nn.Sigmoid())
init_attn(attn)
return attn
| semantic-segmentation-main | network/utils.py |
# Most of the code below is from the following repo:
# https://github.com/HRNet/HRNet-Semantic-Segmentation/tree/HRNet-OCR
#
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn), Jingyi Xie (hsfzxjy@gmail.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
from network.mynn import Norm2d
from runx.logx import logx
from config import cfg
BN_MOMENTUM = 0.1
align_corners = cfg.MODEL.ALIGN_CORNERS
relu_inplace = True
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = Norm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.conv2 = conv3x3(planes, planes)
self.bn2 = Norm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = Norm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = Norm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = Norm2d(planes * self.expansion, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logx.msg(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logx.msg(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logx.msg(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != (num_channels[branch_index] *
block.expansion):
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
Norm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
Norm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
Norm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
Norm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear', align_corners=align_corners)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, **kwargs):
extra = cfg.MODEL.OCR_EXTRA
super(HighResolutionNet, self).__init__()
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = Norm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = Norm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion
for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion
for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion
for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
self.high_level_ch = np.int(np.sum(pre_stage_channels))
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
Norm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
Norm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
Norm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x_in):
x = self.conv1(x_in)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w),
mode='bilinear', align_corners=align_corners)
x2 = F.interpolate(x[2], size=(x0_h, x0_w),
mode='bilinear', align_corners=align_corners)
x3 = F.interpolate(x[3], size=(x0_h, x0_w),
mode='bilinear', align_corners=align_corners)
feats = torch.cat([x[0], x1, x2, x3], 1)
return None, None, feats
def init_weights(self, pretrained=cfg.MODEL.HRNET_CHECKPOINT):
logx.msg('=> init weights from normal distribution')
for name, m in self.named_modules():
if any(part in name for part in {'cls', 'aux', 'ocr'}):
# print('skipped', name)
continue
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, cfg.MODEL.BNFUNC):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained,
map_location={'cuda:0': 'cpu'})
logx.msg('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k.replace('last_layer',
'aux_head').replace('model.', ''): v
for k, v in pretrained_dict.items()}
#print(set(model_dict) - set(pretrained_dict))
#print(set(pretrained_dict) - set(model_dict))
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
elif pretrained:
raise RuntimeError('No such file {}'.format(pretrained))
def get_seg_model():
model = HighResolutionNet()
model.init_weights()
return model
| semantic-segmentation-main | network/hrnetv2.py |
import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
relu_inplace = True
| semantic-segmentation-main | network/bn_helper.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from torch import nn
from network.mynn import initialize_weights, Upsample, scale_as
from network.mynn import ResizeX
from network.utils import get_trunk
from network.utils import BNReLU, get_aspp
from network.utils import make_attn_head
from network.ocr_utils import SpatialGather_Module, SpatialOCR_Module
from config import cfg
from utils.misc import fmt_scale
class OCR_block(nn.Module):
"""
Some of the code in this class is borrowed from:
https://github.com/HRNet/HRNet-Semantic-Segmentation/tree/HRNet-OCR
"""
def __init__(self, high_level_ch):
super(OCR_block, self).__init__()
ocr_mid_channels = cfg.MODEL.OCR.MID_CHANNELS
ocr_key_channels = cfg.MODEL.OCR.KEY_CHANNELS
num_classes = cfg.DATASET.NUM_CLASSES
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(high_level_ch, ocr_mid_channels,
kernel_size=3, stride=1, padding=1),
BNReLU(ocr_mid_channels),
)
self.ocr_gather_head = SpatialGather_Module(num_classes)
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels, num_classes, kernel_size=1, stride=1, padding=0,
bias=True)
self.aux_head = nn.Sequential(
nn.Conv2d(high_level_ch, high_level_ch,
kernel_size=1, stride=1, padding=0),
BNReLU(high_level_ch),
nn.Conv2d(high_level_ch, num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
)
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.conv3x3_ocr,
self.ocr_gather_head,
self.ocr_distri_head,
self.cls_head,
self.aux_head)
def forward(self, high_level_features):
feats = self.conv3x3_ocr(high_level_features)
aux_out = self.aux_head(high_level_features)
context = self.ocr_gather_head(feats, aux_out)
ocr_feats = self.ocr_distri_head(feats, context)
cls_out = self.cls_head(ocr_feats)
return cls_out, aux_out, ocr_feats
class OCRNet(nn.Module):
"""
OCR net
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(OCRNet, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(trunk)
self.ocr = OCR_block(high_level_ch)
def forward(self, inputs):
assert 'images' in inputs
x = inputs['images']
_, _, high_level_features = self.backbone(x)
cls_out, aux_out, _ = self.ocr(high_level_features)
aux_out = scale_as(aux_out, x)
cls_out = scale_as(cls_out, x)
if self.training:
gts = inputs['gts']
aux_loss = self.criterion(aux_out, gts,
do_rmi=cfg.LOSS.OCR_AUX_RMI)
main_loss = self.criterion(cls_out, gts)
loss = cfg.LOSS.OCR_ALPHA * aux_loss + main_loss
return loss
else:
output_dict = {'pred': cls_out}
return output_dict
class OCRNetASPP(nn.Module):
"""
OCR net
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(OCRNetASPP, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8)
self.ocr = OCR_block(aspp_out_ch)
def forward(self, inputs):
assert 'images' in inputs
x = inputs['images']
_, _, high_level_features = self.backbone(x)
aspp = self.aspp(high_level_features)
cls_out, aux_out, _ = self.ocr(aspp)
aux_out = scale_as(aux_out, x)
cls_out = scale_as(cls_out, x)
if self.training:
gts = inputs['gts']
loss = cfg.LOSS.OCR_ALPHA * self.criterion(aux_out, gts) + \
self.criterion(cls_out, gts)
return loss
else:
output_dict = {'pred': cls_out}
return output_dict
class MscaleOCR(nn.Module):
"""
OCR net
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(MscaleOCR, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(trunk)
self.ocr = OCR_block(high_level_ch)
self.scale_attn = make_attn_head(
in_ch=cfg.MODEL.OCR.MID_CHANNELS, out_ch=1)
def _fwd(self, x):
x_size = x.size()[2:]
_, _, high_level_features = self.backbone(x)
cls_out, aux_out, ocr_mid_feats = self.ocr(high_level_features)
attn = self.scale_attn(ocr_mid_feats)
aux_out = Upsample(aux_out, x_size)
cls_out = Upsample(cls_out, x_size)
attn = Upsample(attn, x_size)
return {'cls_out': cls_out,
'aux_out': aux_out,
'logit_attn': attn}
def nscale_forward(self, inputs, scales):
"""
Hierarchical attention, primarily used for getting best inference
results.
We use attention at multiple scales, giving priority to the lower
resolutions. For example, if we have 4 scales {0.5, 1.0, 1.5, 2.0},
then evaluation is done as follows:
p_joint = attn_1.5 * p_1.5 + (1 - attn_1.5) * down(p_2.0)
p_joint = attn_1.0 * p_1.0 + (1 - attn_1.0) * down(p_joint)
p_joint = up(attn_0.5 * p_0.5) * (1 - up(attn_0.5)) * p_joint
The target scale is always 1.0, and 1.0 is expected to be part of the
list of scales. When predictions are done at greater than 1.0 scale,
the predictions are downsampled before combining with the next lower
scale.
Inputs:
scales - a list of scales to evaluate
inputs - dict containing 'images', the input, and 'gts', the ground
truth mask
Output:
If training, return loss, else return prediction + attention
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Lower resolution provides attention for higher rez predictions,
# so we evaluate in order: high to low
scales = sorted(scales, reverse=True)
pred = None
aux = None
output_dict = {}
for s in scales:
x = ResizeX(x_1x, s)
outs = self._fwd(x)
cls_out = outs['cls_out']
attn_out = outs['logit_attn']
aux_out = outs['aux_out']
output_dict[fmt_scale('pred', s)] = cls_out
if s != 2.0:
output_dict[fmt_scale('attn', s)] = attn_out
if pred is None:
pred = cls_out
aux = aux_out
elif s >= 1.0:
# downscale previous
pred = scale_as(pred, cls_out)
pred = attn_out * cls_out + (1 - attn_out) * pred
aux = scale_as(aux, cls_out)
aux = attn_out * aux_out + (1 - attn_out) * aux
else:
# s < 1.0: upscale current
cls_out = attn_out * cls_out
aux_out = attn_out * aux_out
cls_out = scale_as(cls_out, pred)
aux_out = scale_as(aux_out, pred)
attn_out = scale_as(attn_out, pred)
pred = cls_out + (1 - attn_out) * pred
aux = aux_out + (1 - attn_out) * aux
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = cfg.LOSS.OCR_ALPHA * self.criterion(aux, gts) + \
self.criterion(pred, gts)
return loss
else:
output_dict['pred'] = pred
return output_dict
def two_scale_forward(self, inputs):
"""
Do we supervised both aux outputs, lo and high scale?
Should attention be used to combine the aux output?
Normally we only supervise the combined 1x output
If we use attention to combine the aux outputs, then
we can use normal weighting for aux vs. cls outputs
"""
assert 'images' in inputs
x_1x = inputs['images']
x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)
lo_outs = self._fwd(x_lo)
pred_05x = lo_outs['cls_out']
p_lo = pred_05x
aux_lo = lo_outs['aux_out']
logit_attn = lo_outs['logit_attn']
attn_05x = logit_attn
hi_outs = self._fwd(x_1x)
pred_10x = hi_outs['cls_out']
p_1x = pred_10x
aux_1x = hi_outs['aux_out']
p_lo = logit_attn * p_lo
aux_lo = logit_attn * aux_lo
p_lo = scale_as(p_lo, p_1x)
aux_lo = scale_as(aux_lo, p_1x)
logit_attn = scale_as(logit_attn, p_1x)
# combine lo and hi predictions with attention
joint_pred = p_lo + (1 - logit_attn) * p_1x
joint_aux = aux_lo + (1 - logit_attn) * aux_1x
if self.training:
gts = inputs['gts']
do_rmi = cfg.LOSS.OCR_AUX_RMI
aux_loss = self.criterion(joint_aux, gts, do_rmi=do_rmi)
# Optionally turn off RMI loss for first epoch to try to work
# around cholesky errors of singular matrix
do_rmi_main = True # cfg.EPOCH > 0
main_loss = self.criterion(joint_pred, gts, do_rmi=do_rmi_main)
loss = cfg.LOSS.OCR_ALPHA * aux_loss + main_loss
# Optionally, apply supervision to the multi-scale predictions
# directly. Turn off RMI to keep things lightweight
if cfg.LOSS.SUPERVISED_MSCALE_WT:
scaled_pred_05x = scale_as(pred_05x, p_1x)
loss_lo = self.criterion(scaled_pred_05x, gts, do_rmi=False)
loss_hi = self.criterion(pred_10x, gts, do_rmi=False)
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_lo
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_hi
return loss
else:
output_dict = {
'pred': joint_pred,
'pred_05x': pred_05x,
'pred_10x': pred_10x,
'attn_05x': attn_05x,
}
return output_dict
def forward(self, inputs):
if cfg.MODEL.N_SCALES and not self.training:
return self.nscale_forward(inputs, cfg.MODEL.N_SCALES)
return self.two_scale_forward(inputs)
def HRNet(num_classes, criterion):
return OCRNet(num_classes, trunk='hrnetv2', criterion=criterion)
def HRNet_Mscale(num_classes, criterion):
return MscaleOCR(num_classes, trunk='hrnetv2', criterion=criterion)
| semantic-segmentation-main | network/ocrnet.py |
"""
Code Adapted from:
https://github.com/sthalles/deeplab_v3
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from network.mynn import initialize_weights, Norm2d, Upsample
from network.utils import get_aspp, get_trunk, make_seg_head
class DeepV3Plus(nn.Module):
"""
DeepLabV3+ with various trunks supported
Always stride8
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
use_dpc=False, init_all=False):
super(DeepV3Plus, self).__init__()
self.criterion = criterion
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8,
dpc=use_dpc)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.final = nn.Sequential(
nn.Conv2d(256 + 48, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, kernel_size=1, bias=False))
if init_all:
initialize_weights(self.aspp)
initialize_weights(self.bot_aspp)
initialize_weights(self.bot_fine)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def forward(self, inputs):
assert 'images' in inputs
x = inputs['images']
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
conv_aspp = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
conv_aspp = Upsample(conv_aspp, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
final = self.final(cat_s4)
out = Upsample(final, x_size[2:])
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
return self.criterion(out, gts)
return {'pred': out}
def DeepV3PlusSRNX50(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='seresnext-50', criterion=criterion)
def DeepV3PlusR50(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='resnet-50', criterion=criterion)
def DeepV3PlusSRNX101(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='seresnext-101', criterion=criterion)
def DeepV3PlusW38(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='wrn38', criterion=criterion)
def DeepV3PlusW38I(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='wrn38', criterion=criterion,
init_all=True)
def DeepV3PlusX71(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='xception71', criterion=criterion)
def DeepV3PlusEffB4(num_classes, criterion):
return DeepV3Plus(num_classes, trunk='efficientnet_b4',
criterion=criterion)
class DeepV3(nn.Module):
"""
DeepLabV3 with various trunks supported
"""
def __init__(self, num_classes, trunk='resnet-50', criterion=None,
use_dpc=False, init_all=False, output_stride=8):
super(DeepV3, self).__init__()
self.criterion = criterion
self.backbone, _s2_ch, _s4_ch, high_level_ch = \
get_trunk(trunk, output_stride=output_stride)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=output_stride,
dpc=use_dpc)
self.final = make_seg_head(in_ch=aspp_out_ch, out_ch=num_classes)
initialize_weights(self.aspp)
initialize_weights(self.final)
def forward(self, inputs):
assert 'images' in inputs
x = inputs['images']
x_size = x.size()
_, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
final = self.final(aspp)
out = Upsample(final, x_size[2:])
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
return self.criterion(out, gts)
return {'pred': out}
def DeepV3R50(num_classes, criterion):
return DeepV3(num_classes, trunk='resnet-50', criterion=criterion)
| semantic-segmentation-main | network/deepv3.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from network.mynn import Upsample2
from network.utils import ConvBnRelu, get_trunk, get_aspp
class DeeperS8(nn.Module):
"""
Panoptic DeepLab-style semantic segmentation network
stride8 only
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None):
super(DeeperS8, self).__init__()
self.criterion = criterion
self.trunk, s2_ch, s4_ch, high_level_ch = get_trunk(trunk_name=trunk,
output_stride=8)
self.aspp, aspp_out_ch = get_aspp(high_level_ch, bottleneck_ch=256,
output_stride=8)
self.convs2 = nn.Conv2d(s2_ch, 32, kernel_size=1, bias=False)
self.convs4 = nn.Conv2d(s4_ch, 64, kernel_size=1, bias=False)
self.conv_up1 = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.conv_up2 = ConvBnRelu(256 + 64, 256, kernel_size=5, padding=2)
self.conv_up3 = ConvBnRelu(256 + 32, 256, kernel_size=5, padding=2)
self.conv_up5 = nn.Conv2d(256, num_classes, kernel_size=1, bias=False)
def forward(self, inputs, gts=None):
assert 'images' in inputs
x = inputs['images']
s2_features, s4_features, final_features = self.trunk(x)
s2_features = self.convs2(s2_features)
s4_features = self.convs4(s4_features)
aspp = self.aspp(final_features)
x = self.conv_up1(aspp)
x = Upsample2(x)
x = torch.cat([x, s4_features], 1)
x = self.conv_up2(x)
x = Upsample2(x)
x = torch.cat([x, s2_features], 1)
x = self.conv_up3(x)
x = self.conv_up5(x)
x = Upsample2(x)
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
return self.criterion(x, gts)
return {'pred': x}
def DeeperW38(num_classes, criterion, s2s4=True):
return DeeperS8(num_classes, criterion=criterion, trunk='wrn38')
def DeeperX71(num_classes, criterion, s2s4=True):
return DeeperS8(num_classes, criterion=criterion, trunk='xception71')
def DeeperEffB4(num_classes, criterion, s2s4=True):
return DeeperS8(num_classes, criterion=criterion, trunk='efficientnet_b4')
| semantic-segmentation-main | network/deeper.py |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn), Jingyi Xie (hsfzxjy@gmail.com)
#
# This code is from: https://github.com/HRNet/HRNet-Semantic-Segmentation
# ------------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import cfg
from network.utils import BNReLU
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
Output:
The correlation of every class map with every feature map
shape = [n, num_feats, num_classes, 1]
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _, _ = probs.size(0), probs.size(1), probs.size(2), \
probs.size(3)
# each class image now a vector
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2) # batch x k x hw
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).unsqueeze(3)
return ocr_context
class ObjectAttentionBlock(nn.Module):
'''
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature
maps (save memory cost)
Return:
N X C X H X W
'''
def __init__(self, in_channels, key_channels, scale=1):
super(ObjectAttentionBlock, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels),
)
self.f_object = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels),
)
self.f_down = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.key_channels),
)
self.f_up = nn.Sequential(
nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, bias=False),
BNReLU(self.in_channels),
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(input=context, size=(h, w), mode='bilinear',
align_corners=cfg.MODEL.ALIGN_CORNERS)
return context
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the representation
for each pixel.
"""
def __init__(self, in_channels, key_channels, out_channels, scale=1,
dropout=0.1):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock(in_channels,
key_channels,
scale)
if cfg.MODEL.OCR_ASPP:
self.aspp, aspp_out_ch = get_aspp(
in_channels, bottleneck_ch=cfg.MODEL.ASPP_BOT_CH,
output_stride=8)
_in_channels = 2 * in_channels + aspp_out_ch
else:
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0,
bias=False),
BNReLU(out_channels),
nn.Dropout2d(dropout)
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
if cfg.MODEL.OCR_ASPP:
aspp = self.aspp(feats)
output = self.conv_bn_dropout(torch.cat([context, aspp, feats], 1))
else:
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
| semantic-segmentation-main | network/ocr_utils.py |
"""
Custom Norm wrappers to enable sync BN, regular BN and for weight
initialization
"""
import re
import torch
import torch.nn as nn
from config import cfg
from apex import amp
from runx.logx import logx
align_corners = cfg.MODEL.ALIGN_CORNERS
def Norm2d(in_channels, **kwargs):
"""
Custom Norm Function to allow flexible switching
"""
layer = getattr(cfg.MODEL, 'BNFUNC')
normalization_layer = layer(in_channels, **kwargs)
return normalization_layer
def initialize_weights(*models):
"""
Initialize Model Weights
"""
for model in models:
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, cfg.MODEL.BNFUNC):
module.weight.data.fill_(1)
module.bias.data.zero_()
@amp.float_function
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode='bilinear',
align_corners=align_corners)
@amp.float_function
def Upsample2(x):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, scale_factor=2, mode='bilinear',
align_corners=align_corners)
def Down2x(x):
return torch.nn.functional.interpolate(
x, scale_factor=0.5, mode='bilinear', align_corners=align_corners)
def Up15x(x):
return torch.nn.functional.interpolate(
x, scale_factor=1.5, mode='bilinear', align_corners=align_corners)
def scale_as(x, y):
'''
scale x to the same size as y
'''
y_size = y.size(2), y.size(3)
if cfg.OPTIONS.TORCH_VERSION >= 1.5:
x_scaled = torch.nn.functional.interpolate(
x, size=y_size, mode='bilinear',
align_corners=align_corners)
else:
x_scaled = torch.nn.functional.interpolate(
x, size=y_size, mode='bilinear',
align_corners=align_corners)
return x_scaled
def DownX(x, scale_factor):
'''
scale x to the same size as y
'''
if cfg.OPTIONS.TORCH_VERSION >= 1.5:
x_scaled = torch.nn.functional.interpolate(
x, scale_factor=scale_factor, mode='bilinear',
align_corners=align_corners, recompute_scale_factor=True)
else:
x_scaled = torch.nn.functional.interpolate(
x, scale_factor=scale_factor, mode='bilinear',
align_corners=align_corners)
return x_scaled
def ResizeX(x, scale_factor):
'''
scale x by some factor
'''
if cfg.OPTIONS.TORCH_VERSION >= 1.5:
x_scaled = torch.nn.functional.interpolate(
x, scale_factor=scale_factor, mode='bilinear',
align_corners=align_corners, recompute_scale_factor=True)
else:
x_scaled = torch.nn.functional.interpolate(
x, scale_factor=scale_factor, mode='bilinear',
align_corners=align_corners)
return x_scaled
| semantic-segmentation-main | network/mynn.py |
# Xception71
# Code Adapted from:
# https://github.com/jfzhang95/pytorch-deeplab-xception/blob/master/modeling/backbone/xception.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import cfg
from network.mynn import Norm2d
from apex.parallel import SyncBatchNorm
from runx.logx import logx
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1,
bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0,
dilation, groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0],
dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1,
BatchNorm=None, start_with_relu=True, grow_first=True,
is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride,
bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation,
BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation,
BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation,
BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2,
BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1,
BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class xception71(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(xception71, self).__init__()
self.output_stride = output_stride
if self.output_stride == 16:
middle_block_dilation = 1
exit_block_dilations = (1, 2)
exit_stride = 2
elif self.output_stride == 8:
middle_block_dilation = 2
exit_block_dilations = (2, 4)
exit_stride = 1
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
# stride4
self.block2 = Block(128, 256, reps=2, stride=1, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=2, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# stride8
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=exit_stride, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
str2 = self.relu(x)
# s2
str4 = self.block1(str2)
str4 = self.relu(str4)
# s4
x = self.block2(str4)
str8 = self.block3(x)
# s8
if self.output_stride == 8:
low_level_feat, high_level_feat = str2, str4
else:
low_level_feat, high_level_feat = str4, str8
# Middle flow
x = self.block4(str8)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return low_level_feat, high_level_feat, x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SyncBatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrained_model = cfg.MODEL.X71_CHECKPOINT
ckpt = torch.load(pretrained_model, map_location='cpu')
model_dict = {k.replace('module.', ''): v for k, v in
ckpt['model_dict'].items()}
state_dict = self.state_dict()
state_dict.update(model_dict)
self.load_state_dict(state_dict, strict=False)
del ckpt
logx.msg('Loaded {} weights'.format(pretrained_model))
if __name__ == "__main__":
model = xception71(BatchNorm=Norm2d, pretrained=True,
output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| semantic-segmentation-main | network/xception.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from config import cfg
from network.mynn import initialize_weights, Norm2d, Upsample, Upsample2
from network.mynn import ResizeX, scale_as
from network.utils import get_aspp, get_trunk, ConvBnRelu
from network.utils import make_seg_head, make_attn_head
from utils.misc import fmt_scale
class MscaleBase(nn.Module):
"""
Multi-scale attention segmentation model base class
"""
def __init__(self):
super(MscaleBase, self).__init__()
self.criterion = None
self.fuse_aspp = False
def _fwd(self, x, aspp_in=None):
pass
def recurse_fuse_fwd(self, x, scales, aspp_lo=None, attn_lo=None):
"""
recursive eval for n-scales
target resolution is fixed at 1.0
[0.5, 1.0]:
p_0.5, aspp_0.5, attn_0.5 = fwd(attn,aspp=None)
p_1.0 = recurse([1.0], aspp_0.5, attn_0.5)
p_1.0 = fwd(attn_0.5, aspp_0.5)
output = attn_0.5 * p_0.5 + (1 - attn_0.5) * p_1.0
"""
this_scale = scales.pop()
if this_scale == 1.0:
x_resize = x
else:
x_resize = ResizeX(x, this_scale)
p, attn, aspp = self._fwd(x_resize, attn_lo=attn_lo, aspp_lo=aspp_lo)
if this_scale == 1.0:
p_1x = p
attn_1x = attn
else:
p_1x = scale_as(p, x)
attn_1x = scale_as(attn, x)
if len(scales) == 0:
output = p_1x
else:
output = attn_1x * p_1x
p_next, _ = self.recurse_fuse_fwd(x, scales,
attn_lo=attn, aspp_lo=aspp)
output += (1 - attn_1x) * p_next
return output, attn_1x
def nscale_fused_forward(self, inputs, scales):
"""
multi-scale evaluation for model with fused_aspp feature
Evaluation must happen in two directions: from low to high to feed
aspp features forward, then back down high to low to apply attention
such that the lower scale gets higher priority
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Evaluation must happen low to high so that we can feed the ASPP
# features forward to higher scales
scales = sorted(scales, reverse=True)
# Recursively evaluate from low to high scales
pred, attn = self.recurse_fuse_fwd(x_1x, scales)
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
return {'pred': pred, 'attn_10x': attn}
def nscale_forward(self, inputs, scales):
"""
Hierarchical attention, primarily used for getting best inference
results.
We use attention at multiple scales, giving priority to the lower
resolutions. For example, if we have 4 scales {0.5, 1.0, 1.5, 2.0},
then evaluation is done as follows:
p_joint = attn_1.5 * p_1.5 + (1 - attn_1.5) * down(p_2.0)
p_joint = attn_1.0 * p_1.0 + (1 - attn_1.0) * down(p_joint)
p_joint = up(attn_0.5 * p_0.5) * (1 - up(attn_0.5)) * p_joint
The target scale is always 1.0, and 1.0 is expected to be part of the
list of scales. When predictions are done at greater than 1.0 scale,
the predictions are downsampled before combining with the next lower
scale.
Inputs:
scales - a list of scales to evaluate
inputs - dict containing 'images', the input, and 'gts', the ground
truth mask
Output:
If training, return loss, else return prediction + attention
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Lower resolution provides attention for higher rez predictions,
# so we evaluate in order: high to low
scales = sorted(scales, reverse=True)
pred = None
output_dict = {}
for s in scales:
x = ResizeX(x_1x, s)
bs = x.shape[0]
scale_float = torch.Tensor(bs).fill_(s)
p, attn, _aspp_attn, _aspp = self._fwd(x, scale_float=scale_float)
output_dict[fmt_scale('pred', s)] = p
if s != 2.0:
output_dict[fmt_scale('attn', s)] = attn
if pred is None:
pred = p
elif s >= 1.0:
# downscale previous
pred = scale_as(pred, p)
pred = attn * p + (1 - attn) * pred
else:
# upscale current
p = attn * p
p = scale_as(p, pred)
attn = scale_as(attn, pred)
pred = p + (1 - attn) * pred
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
output_dict['pred'] = pred
return output_dict
def two_scale_forward(self, inputs):
assert 'images' in inputs
x_1x = inputs['images']
x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)
pred_05x, attn_05x, aspp_attn, aspp_lo = \
self._fwd(x_lo)
p_1x, _, _, _ = self._fwd(x_1x, aspp_lo=aspp_lo,
aspp_attn=aspp_attn)
p_lo = attn_05x * pred_05x
p_lo = scale_as(p_lo, p_1x)
logit_attn = scale_as(attn_05x, p_1x)
joint_pred = p_lo + (1 - logit_attn) * p_1x
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(joint_pred, gts)
# Optionally, apply supervision to the multi-scale predictions
# directly. Turn off RMI to keep things lightweight
if cfg.LOSS.SUPERVISED_MSCALE_WT:
scaled_pred_05x = scale_as(pred_05x, p_1x)
loss_lo = self.criterion(scaled_pred_05x, gts, do_rmi=False)
loss_hi = self.criterion(p_1x, gts, do_rmi=False)
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_lo
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_hi
return loss
else:
output_dict = {
'pred': joint_pred,
'pred_05x': pred_05x,
'pred_10x': p_1x,
'attn_05x': attn_05x,
}
return output_dict
def forward(self, inputs):
if cfg.MODEL.N_SCALES and not self.training:
if self.fuse_aspp:
return self.nscale_fused_forward(inputs, cfg.MODEL.N_SCALES)
else:
return self.nscale_forward(inputs, cfg.MODEL.N_SCALES)
return self.two_scale_forward(inputs)
class MscaleV3Plus(MscaleBase):
"""
DeepLabV3Plus-based mscale segmentation model
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
use_dpc=False, fuse_aspp=False, attn_2b=False):
super(MscaleV3Plus, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8,
dpc=use_dpc)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
# Semantic segmentation prediction head
bot_ch = cfg.MODEL.SEGATTN_BOT_CH
self.final = nn.Sequential(
nn.Conv2d(256 + 48, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, num_classes, kernel_size=1, bias=False))
# Scale-attention prediction head
if self.attn_2b:
attn_ch = 2
else:
attn_ch = 1
scale_in_ch = 256 + 48
self.scale_attn = make_attn_head(in_ch=scale_in_ch,
out_ch=attn_ch)
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.bot_fine)
initialize_weights(self.bot_aspp)
initialize_weights(self.scale_attn)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def _build_scale_tensor(self, scale_float, shape):
"""
Fill a 2D tensor with a constant scale value
"""
bs = scale_float.shape[0]
scale_tensor = None
for b in range(bs):
a_tensor = torch.Tensor(1, 1, *shape)
a_tensor.fill_(scale_float[b])
if scale_tensor is None:
scale_tensor = a_tensor
else:
scale_tensor = torch.cat([scale_tensor, a_tensor])
scale_tensor = scale_tensor.cuda()
return scale_tensor
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
conv_aspp = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
conv_aspp = Upsample(conv_aspp, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4_attn = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
cat_s4_attn = torch.cat(cat_s4_attn, 1)
final = self.final(cat_s4)
scale_attn = self.scale_attn(cat_s4_attn)
out = Upsample(final, x_size[2:])
scale_attn = Upsample(scale_attn, x_size[2:])
if self.attn_2b:
logit_attn = scale_attn[:, 0:1, :, :]
aspp_attn = scale_attn[:, 1:, :, :]
else:
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def DeepV3R50(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='resnet-50', criterion=criterion)
def DeepV3W38(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion)
def DeepV3W38Fuse(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion,
fuse_aspp=True)
def DeepV3W38Fuse2(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion,
fuse_aspp=True, attn_2b=True)
def DeepV3EffB4(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='efficientnet_b4',
criterion=criterion)
def DeepV3EffB4Fuse(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='efficientnet_b4',
criterion=criterion, fuse_aspp=True)
def DeepV3X71(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='xception71', criterion=criterion)
class MscaleDeeper(MscaleBase):
"""
Panoptic DeepLab-style semantic segmentation network
stride8 only
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
fuse_aspp=False, attn_2b=False):
super(MscaleDeeper, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, s4_ch, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.aspp, aspp_out_ch = get_aspp(high_level_ch, bottleneck_ch=256,
output_stride=8)
self.convs2 = nn.Conv2d(s2_ch, 32, kernel_size=1, bias=False)
self.convs4 = nn.Conv2d(s4_ch, 64, kernel_size=1, bias=False)
self.conv_up1 = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.conv_up2 = ConvBnRelu(256 + 64, 256, kernel_size=5, padding=2)
self.conv_up3 = ConvBnRelu(256 + 32, 256, kernel_size=5, padding=2)
self.conv_up5 = nn.Conv2d(256, num_classes, kernel_size=1, bias=False)
# Scale-attention prediction head
if self.attn_2b:
attn_ch = 2
else:
attn_ch = 1
self.scale_attn = make_attn_head(in_ch=256,
out_ch=attn_ch)
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.convs2, self.convs4, self.conv_up1,
self.conv_up2, self.conv_up3, self.conv_up5,
self.scale_attn)
def _fwd(self, x, aspp_lo=None, aspp_attn=None):
s2_features, s4_features, final_features = self.backbone(x)
s2_features = self.convs2(s2_features)
s4_features = self.convs4(s4_features)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
x = self.conv_up1(aspp)
x = Upsample2(x)
x = torch.cat([x, s4_features], 1)
x = self.conv_up2(x)
x = Upsample2(x)
x = torch.cat([x, s2_features], 1)
up3 = self.conv_up3(x)
out = self.conv_up5(up3)
out = Upsample2(out)
scale_attn = self.scale_attn(up3)
scale_attn = Upsample2(scale_attn)
if self.attn_2b:
logit_attn = scale_attn[:, 0:1, :, :]
aspp_attn = scale_attn[:, 1:, :, :]
else:
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def DeeperW38(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='wrn38')
def DeeperX71(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='xception71')
def DeeperEffB4(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='efficientnet_b4')
class MscaleBasic(MscaleBase):
"""
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(MscaleBasic, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.cls_head = make_seg_head(in_ch=high_level_ch,
out_ch=num_classes)
self.scale_attn = make_attn_head(in_ch=high_level_ch,
out_ch=1)
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
_, _, final_features = self.backbone(x)
attn = self.scale_attn(final_features)
pred = self.cls_head(final_features)
attn = scale_as(attn, x)
pred = scale_as(pred, x)
return pred, attn, None, None
def HRNet(num_classes, criterion, s2s4=None):
return MscaleBasic(num_classes=num_classes, criterion=criterion,
trunk='hrnetv2')
class ASPP(MscaleBase):
"""
ASPP-based Mscale
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(ASPP, self).__init__()
self.criterion = criterion
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=cfg.MODEL.ASPP_BOT_CH,
output_stride=8)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.final = make_seg_head(in_ch=256, out_ch=num_classes)
self.scale_attn = make_attn_head(in_ch=256, out_ch=1)
initialize_weights(self.final)
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
x_size = x.size()
_, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
aspp = self.bot_aspp(aspp)
final = self.final(aspp)
scale_attn = self.scale_attn(aspp)
out = Upsample(final, x_size[2:])
scale_attn = Upsample(scale_attn, x_size[2:])
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def HRNet_ASP(num_classes, criterion, s2s4=None):
return ASPP(num_classes=num_classes, criterion=criterion, trunk='hrnetv2')
| semantic-segmentation-main | network/mscale.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
This is an alternative implementation of mscale, where we feed pairs of
features from both lower and higher resolution images into the attention head.
"""
import torch
from torch import nn
from network.mynn import initialize_weights, Norm2d, Upsample
from network.mynn import ResizeX, scale_as
from network.utils import get_aspp, get_trunk
from network.utils import make_seg_head, make_attn_head
from config import cfg
class MscaleBase(nn.Module):
"""
Multi-scale attention segmentation model base class
"""
def __init__(self):
super(MscaleBase, self).__init__()
self.criterion = None
def _fwd(self, x):
pass
def nscale_forward(self, inputs, scales):
"""
Hierarchical attention, primarily used for getting best inference
results.
We use attention at multiple scales, giving priority to the lower
resolutions. For example, if we have 4 scales {0.5, 1.0, 1.5, 2.0},
then evaluation is done as follows:
p_joint = attn_1.5 * p_1.5 + (1 - attn_1.5) * down(p_2.0)
p_joint = attn_1.0 * p_1.0 + (1 - attn_1.0) * down(p_joint)
p_joint = up(attn_0.5 * p_0.5) * (1 - up(attn_0.5)) * p_joint
The target scale is always 1.0, and 1.0 is expected to be part of the
list of scales. When predictions are done at greater than 1.0 scale,
the predictions are downsampled before combining with the next lower
scale.
Inputs:
scales - a list of scales to evaluate
inputs - dict containing 'images', the input, and 'gts', the ground
truth mask
Output:
If training, return loss, else return prediction + attention
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Lower resolution provides attention for higher rez predictions,
# so we evaluate in order: high to low
scales = sorted(scales, reverse=True)
pred = None
last_feats = None
for idx, s in enumerate(scales):
x = ResizeX(x_1x, s)
p, feats = self._fwd(x)
# Generate attention prediction
if idx > 0:
assert last_feats is not None
# downscale feats
last_feats = scale_as(last_feats, feats)
cat_feats = torch.cat([feats, last_feats], 1)
attn = self.scale_attn(cat_feats)
attn = scale_as(attn, p)
if pred is None:
# This is the top scale prediction
pred = p
elif s >= 1.0:
# downscale previous
pred = scale_as(pred, p)
pred = attn * p + (1 - attn) * pred
else:
# upscale current
p = attn * p
p = scale_as(p, pred)
attn = scale_as(attn, pred)
pred = p + (1 - attn) * pred
last_feats = feats
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
# FIXME: should add multi-scale values for pred and attn
return {'pred': pred,
'attn_10x': attn}
def two_scale_forward(self, inputs):
assert 'images' in inputs
x_1x = inputs['images']
x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)
p_lo, feats_lo = self._fwd(x_lo)
p_1x, feats_hi = self._fwd(x_1x)
feats_hi = scale_as(feats_hi, feats_lo)
cat_feats = torch.cat([feats_lo, feats_hi], 1)
logit_attn = self.scale_attn(cat_feats)
logit_attn = scale_as(logit_attn, p_lo)
p_lo = logit_attn * p_lo
p_lo = scale_as(p_lo, p_1x)
logit_attn = scale_as(logit_attn, p_1x)
joint_pred = p_lo + (1 - logit_attn) * p_1x
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(joint_pred, gts)
return loss
else:
# FIXME: should add multi-scale values for pred and attn
return {'pred': joint_pred,
'attn_10x': logit_attn}
def forward(self, inputs):
if cfg.MODEL.N_SCALES and not self.training:
return self.nscale_forward(inputs, cfg.MODEL.N_SCALES)
return self.two_scale_forward(inputs)
class MscaleV3Plus(MscaleBase):
"""
DeepLabV3Plus-based mscale segmentation model
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None):
super(MscaleV3Plus, self).__init__()
self.criterion = criterion
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
# Semantic segmentation prediction head
self.final = nn.Sequential(
nn.Conv2d(256 + 48, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, kernel_size=1, bias=False))
# Scale-attention prediction head
scale_in_ch = 2 * (256 + 48)
self.scale_attn = nn.Sequential(
nn.Conv2d(scale_in_ch, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
Norm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1, kernel_size=1, bias=False),
nn.Sigmoid())
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.bot_fine)
initialize_weights(self.bot_aspp)
initialize_weights(self.scale_attn)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def _fwd(self, x):
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
conv_aspp = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
conv_aspp = Upsample(conv_aspp, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
final = self.final(cat_s4)
out = Upsample(final, x_size[2:])
return out, cat_s4
def DeepV3R50(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='resnet-50', criterion=criterion)
class Basic(MscaleBase):
"""
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(Basic, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.cls_head = make_seg_head(in_ch=high_level_ch, bot_ch=256,
out_ch=num_classes)
self.scale_attn = make_attn_head(in_ch=high_level_ch * 2, bot_ch=256,
out_ch=1)
def two_scale_forward(self, inputs):
assert 'images' in inputs
x_1x = inputs['images']
x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)
p_lo, feats_lo = self._fwd(x_lo)
p_1x, feats_hi = self._fwd(x_1x)
feats_lo = scale_as(feats_lo, feats_hi)
cat_feats = torch.cat([feats_lo, feats_hi], 1)
logit_attn = self.scale_attn(cat_feats)
logit_attn_lo = scale_as(logit_attn, p_lo)
logit_attn_1x = scale_as(logit_attn, p_1x)
p_lo = logit_attn_lo * p_lo
p_lo = scale_as(p_lo, p_1x)
joint_pred = p_lo + (1 - logit_attn_1x) * p_1x
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(joint_pred, gts)
return loss
else:
return joint_pred, logit_attn_1x
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
_, _, final_features = self.backbone(x)
pred = self.cls_head(final_features)
pred = scale_as(pred, x)
return pred, final_features
def HRNet(num_classes, criterion, s2s4=None):
return Basic(num_classes=num_classes, criterion=criterion,
trunk='hrnetv2')
| semantic-segmentation-main | network/mscale2.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import os.path as path
from config import cfg
from runx.logx import logx
from datasets.base_loader import BaseLoader
import datasets.cityscapes_labels as cityscapes_labels
import datasets.uniform as uniform
from datasets.utils import make_dataset_folder
def cities_cv_split(root, split, cv_split):
"""
Find cities that correspond to a given split of the data. We split the data
such that a given city belongs to either train or val, but never both. cv0
is defined to be the default split.
all_cities = [x x x x x x x x x x x x]
val:
split0 [x x x ]
split1 [ x x x ]
split2 [ x x x ]
trn:
split0 [ x x x x x x x x x]
split1 [x x x x x x x x x]
split2 [x x x x x x x x ]
split - train/val/test
cv_split - 0,1,2,3
cv_split == 3 means use train + val
"""
trn_path = path.join(root, 'leftImg8bit_trainvaltest/leftImg8bit', 'train')
val_path = path.join(root, 'leftImg8bit_trainvaltest/leftImg8bit', 'val')
trn_cities = ['train/' + c for c in os.listdir(trn_path)]
trn_cities = sorted(trn_cities) # sort to insure reproducibility
val_cities = ['val/' + c for c in os.listdir(val_path)]
all_cities = val_cities + trn_cities
if cv_split == 3:
logx.msg('cv split {} {} {}'.format(split, cv_split, all_cities))
return all_cities
num_val_cities = len(val_cities)
num_cities = len(all_cities)
offset = cv_split * num_cities // cfg.DATASET.CV_SPLITS
cities = []
for j in range(num_cities):
if j >= offset and j < (offset + num_val_cities):
if split == 'val':
cities.append(all_cities[j])
else:
if split == 'train':
cities.append(all_cities[j])
logx.msg('cv split {} {} {}'.format(split, cv_split, cities))
return cities
def coarse_cities(root):
"""
Find coarse cities
"""
split = 'train_extra'
coarse_path = path.join(root, 'leftImg8bit_trainextra/leftImg8bit',
split)
coarse_cities = [f'{split}/' + c for c in os.listdir(coarse_path)]
logx.msg(f'found {len(coarse_cities)} coarse cities')
return coarse_cities
class Loader(BaseLoader):
num_classes = 19
ignore_label = 255
trainid_to_name = {}
color_mapping = []
def __init__(self, mode, quality='fine', joint_transform_list=None,
img_transform=None, label_transform=None, eval_folder=None):
super(Loader, self).__init__(quality=quality, mode=mode,
joint_transform_list=joint_transform_list,
img_transform=img_transform,
label_transform=label_transform)
######################################################################
# Cityscapes-specific stuff:
######################################################################
self.root = cfg.DATASET.CITYSCAPES_DIR
self.id_to_trainid = cityscapes_labels.label2trainid
self.trainid_to_name = cityscapes_labels.trainId2name
self.fill_colormap()
img_ext = 'png'
mask_ext = 'png'
img_root = path.join(self.root, 'leftImg8bit_trainvaltest/leftImg8bit')
mask_root = path.join(self.root, 'gtFine_trainvaltest/gtFine')
if mode == 'folder':
self.all_imgs = make_dataset_folder(eval_folder)
else:
self.fine_cities = cities_cv_split(self.root, mode, cfg.DATASET.CV)
self.all_imgs = self.find_cityscapes_images(
self.fine_cities, img_root, mask_root, img_ext, mask_ext)
logx.msg(f'cn num_classes {self.num_classes}')
self.fine_centroids = uniform.build_centroids(self.all_imgs,
self.num_classes,
self.train,
cv=cfg.DATASET.CV,
id2trainid=self.id_to_trainid)
self.centroids = self.fine_centroids
if cfg.DATASET.COARSE_BOOST_CLASSES and mode == 'train':
self.coarse_cities = coarse_cities(self.root)
img_root = path.join(self.root,
'leftImg8bit_trainextra/leftImg8bit')
mask_root = path.join(self.root, 'gtCoarse', 'gtCoarse')
self.coarse_imgs = self.find_cityscapes_images(
self.coarse_cities, img_root, mask_root, img_ext, mask_ext,
fine_coarse='gtCoarse')
if cfg.DATASET.CLASS_UNIFORM_PCT:
custom_coarse = (cfg.DATASET.CUSTOM_COARSE_PROB is not None)
self.coarse_centroids = uniform.build_centroids(
self.coarse_imgs, self.num_classes, self.train,
coarse=(not custom_coarse), custom_coarse=custom_coarse,
id2trainid=self.id_to_trainid)
for cid in cfg.DATASET.COARSE_BOOST_CLASSES:
self.centroids[cid].extend(self.coarse_centroids[cid])
else:
self.all_imgs.extend(self.coarse_imgs)
self.build_epoch()
def disable_coarse(self):
"""
Turn off using coarse images in training
"""
self.centroids = self.fine_centroids
def only_coarse(self):
"""
Turn on using coarse images in training
"""
print('==============+Running Only Coarse+===============')
self.centroids = self.coarse_centroids
def find_cityscapes_images(self, cities, img_root, mask_root, img_ext,
mask_ext, fine_coarse='gtFine'):
"""
Find image and segmentation mask files and return a list of
tuples of them.
Inputs:
img_root: path to parent directory of train/val/test dirs
mask_root: path to parent directory of train/val/test dirs
img_ext: image file extension
mask_ext: mask file extension
cities: a list of cities, each element in the form of 'train/a_city'
or 'val/a_city', for example.
"""
items = []
for city in cities:
img_dir = '{root}/{city}'.format(root=img_root, city=city)
for file_name in os.listdir(img_dir):
basename, ext = os.path.splitext(file_name)
assert ext == '.' + img_ext, '{} {}'.format(ext, img_ext)
full_img_fn = os.path.join(img_dir, file_name)
basename, ext = file_name.split('_leftImg8bit')
if cfg.DATASET.CUSTOM_COARSE_PROB and fine_coarse != 'gtFine':
mask_fn = f'{basename}_leftImg8bit.png'
cc_path = cfg.DATASET.CITYSCAPES_CUSTOMCOARSE
full_mask_fn = os.path.join(cc_path, city, mask_fn)
os.path.isfile(full_mask_fn)
else:
mask_fn = f'{basename}_{fine_coarse}_labelIds{ext}'
full_mask_fn = os.path.join(mask_root, city, mask_fn)
items.append((full_img_fn, full_mask_fn))
logx.msg('mode {} found {} images'.format(self.mode, len(items)))
return items
def fill_colormap(self):
palette = [128, 64, 128,
244, 35, 232,
70, 70, 70,
102, 102, 156,
190, 153, 153,
153, 153, 153,
250, 170, 30,
220, 220, 0,
107, 142, 35,
152, 251, 152,
70, 130, 180,
220, 20, 60,
255, 0, 0,
0, 0, 142,
0, 0, 70,
0, 60, 100,
0, 80, 100,
0, 0, 230,
119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
self.color_mapping = palette
| semantic-segmentation-main | datasets/cityscapes.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Dataset setup and loaders
"""
import importlib
import torchvision.transforms as standard_transforms
import transforms.joint_transforms as joint_transforms
import transforms.transforms as extended_transforms
from torch.utils.data import DataLoader
from config import cfg, update_dataset_cfg, update_dataset_inst
from runx.logx import logx
from datasets.randaugment import RandAugment
def setup_loaders(args):
"""
Setup Data Loaders[Currently supports Cityscapes, Mapillary and ADE20kin]
input: argument passed by the user
return: training data loader, validation data loader loader, train_set
"""
# TODO add error checking to make sure class exists
logx.msg(f'dataset = {args.dataset}')
mod = importlib.import_module('datasets.{}'.format(args.dataset))
dataset_cls = getattr(mod, 'Loader')
logx.msg(f'ignore_label = {dataset_cls.ignore_label}')
update_dataset_cfg(num_classes=dataset_cls.num_classes,
ignore_label=dataset_cls.ignore_label)
######################################################################
# Define transformations, augmentations
######################################################################
# Joint transformations that must happen on both image and mask
if ',' in args.crop_size:
args.crop_size = [int(x) for x in args.crop_size.split(',')]
else:
args.crop_size = int(args.crop_size)
train_joint_transform_list = [
# TODO FIXME: move these hparams into cfg
joint_transforms.RandomSizeAndCrop(args.crop_size,
False,
scale_min=args.scale_min,
scale_max=args.scale_max,
full_size=args.full_crop_training,
pre_size=args.pre_size)]
train_joint_transform_list.append(
joint_transforms.RandomHorizontallyFlip())
if args.rand_augment is not None:
N, M = [int(i) for i in args.rand_augment.split(',')]
assert isinstance(N, int) and isinstance(M, int), \
f'Either N {N} or M {M} not integer'
train_joint_transform_list.append(RandAugment(N, M))
######################################################################
# Image only augmentations
######################################################################
train_input_transform = []
if args.color_aug:
train_input_transform += [extended_transforms.ColorJitter(
brightness=args.color_aug,
contrast=args.color_aug,
saturation=args.color_aug,
hue=args.color_aug)]
if args.bblur:
train_input_transform += [extended_transforms.RandomBilateralBlur()]
elif args.gblur:
train_input_transform += [extended_transforms.RandomGaussianBlur()]
mean_std = (cfg.DATASET.MEAN, cfg.DATASET.STD)
train_input_transform += [standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)]
train_input_transform = standard_transforms.Compose(train_input_transform)
val_input_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
if args.jointwtborder:
target_train_transform = \
extended_transforms.RelaxedBoundaryLossToTensor()
else:
target_train_transform = extended_transforms.MaskToTensor()
if args.eval == 'folder':
val_joint_transform_list = None
elif 'mapillary' in args.dataset:
if args.pre_size is None:
eval_size = 2177
else:
eval_size = args.pre_size
if cfg.DATASET.MAPILLARY_CROP_VAL:
val_joint_transform_list = [
joint_transforms.ResizeHeight(eval_size),
joint_transforms.CenterCropPad(eval_size)]
else:
val_joint_transform_list = [
joint_transforms.Scale(eval_size)]
else:
val_joint_transform_list = None
if args.eval is None or args.eval == 'val':
val_name = 'val'
elif args.eval == 'trn':
val_name = 'train'
elif args.eval == 'folder':
val_name = 'folder'
else:
raise 'unknown eval mode {}'.format(args.eval)
######################################################################
# Create loaders
######################################################################
val_set = dataset_cls(
mode=val_name,
joint_transform_list=val_joint_transform_list,
img_transform=val_input_transform,
label_transform=target_transform,
eval_folder=args.eval_folder)
update_dataset_inst(dataset_inst=val_set)
if args.apex:
from datasets.sampler import DistributedSampler
val_sampler = DistributedSampler(val_set, pad=False, permutation=False,
consecutive_sample=False)
else:
val_sampler = None
val_loader = DataLoader(val_set, batch_size=args.bs_val,
num_workers=args.num_workers // 2,
shuffle=False, drop_last=False,
sampler=val_sampler)
if args.eval is not None:
# Don't create train dataloader if eval
train_set = None
train_loader = None
else:
train_set = dataset_cls(
mode='train',
joint_transform_list=train_joint_transform_list,
img_transform=train_input_transform,
label_transform=target_train_transform)
if args.apex:
from datasets.sampler import DistributedSampler
train_sampler = DistributedSampler(train_set, pad=True,
permutation=True,
consecutive_sample=False)
train_batch_size = args.bs_trn
else:
train_sampler = None
train_batch_size = args.bs_trn * args.ngpu
train_loader = DataLoader(train_set, batch_size=train_batch_size,
num_workers=args.num_workers,
shuffle=(train_sampler is None),
drop_last=True, sampler=train_sampler)
return train_loader, val_loader, train_set
| semantic-segmentation-main | datasets/__init__.py |
"""
# File taken from https://github.com/mcordts/cityscapesScripts/
# License File Available at:
# https://github.com/mcordts/cityscapesScripts/blob/master/license.txt
# ----------------------
# The Cityscapes Dataset
# ----------------------
#
#
# License agreement
# -----------------
#
# This dataset is made freely available to academic and non-academic entities for non-commercial purposes such as academic research, teaching, scientific publications, or personal experimentation. Permission is granted to use the data given that you agree:
#
# 1. That the dataset comes "AS IS", without express or implied warranty. Although every effort has been made to ensure accuracy, we (Daimler AG, MPI Informatics, TU Darmstadt) do not accept any responsibility for errors or omissions.
# 2. That you include a reference to the Cityscapes Dataset in any work that makes use of the dataset. For research papers, cite our preferred publication as listed on our website; for other media cite our preferred publication as listed on our website or link to the Cityscapes website.
# 3. That you do not distribute this dataset or modified versions. It is permissible to distribute derivative works in as far as they are abstract representations of this dataset (such as models trained on it or additional annotations that do not directly include any of our data) and do not allow to recover the dataset or something similar in character.
# 4. That you may not use the dataset or any derivative work for commercial purposes as, for example, licensing or selling the data, or using the data with a purpose to procure a commercial gain.
# 5. That all rights not expressly granted to you are reserved by us (Daimler AG, MPI Informatics, TU Darmstadt).
#
#
# Contact
# -------
#
# Marius Cordts, Mohamed Omran
# www.cityscapes-dataset.net
"""
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = { label.name : label for label in labels }
# id to label object
id2label = { label.id : label for label in labels }
# trainId to label object
trainId2label = { label.trainId : label for label in reversed(labels) }
# label2trainid
label2trainid = { label.id : label.trainId for label in labels }
# trainId to label object
trainId2name = { label.trainId : label.name for label in labels }
trainId2color = { label.trainId : label.color for label in labels }
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
# returns the label name that describes a single instance (if possible)
# e.g. input | output
# ----------------------
# car | car
# cargroup | car
# foo | None
# foogroup | None
# skygroup | None
def assureSingleInstanceName( name ):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print((" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( 'name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances', 'ignoreInEval' )))
print((" " + ('-' * 98)))
for label in labels:
print((" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( label.name, label.id, label.trainId, label.category, label.categoryId, label.hasInstances, label.ignoreInEval )))
print("")
print("Example usages:")
# Map from name to label
name = 'car'
id = name2label[name].id
print(("ID of label '{name}': {id}".format( name=name, id=id )))
# Map from ID to label
category = id2label[id].category
print(("Category of label with ID '{id}': {category}".format( id=id, category=category )))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print(("Name of label with trainID '{id}': {name}".format( id=trainId, name=name )))
| semantic-segmentation-main | datasets/cityscapes_labels.py |
import os
def make_dataset_folder(folder):
"""
Create Filename list for images in the provided path
input: path to directory with *only* images files
returns: items list with None filled for mask path
"""
items = os.listdir(folder)
items = [(os.path.join(folder, f), '') for f in items]
items = sorted(items)
print(f'Found {len(items)} folder imgs')
"""
orig_len = len(items)
rem = orig_len % 8
if rem != 0:
items = items[:-rem]
msg = 'Found {} folder imgs but altered to {} to be modulo-8'
msg = msg.format(orig_len, len(items))
print(msg)
"""
return items
| semantic-segmentation-main | datasets/utils.py |
# this code from: https://github.com/ildoonet/pytorch-randaugment
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import numpy as np
import torch
from PIL import Image, ImageOps, ImageEnhance, ImageDraw
from config import cfg
fillmask = cfg.DATASET.IGNORE_LABEL
fillcolor = (0, 0, 0)
def affine_transform(pair, affine_params):
img, mask = pair
img = img.transform(img.size, Image.AFFINE, affine_params,
resample=Image.BILINEAR, fillcolor=fillcolor)
mask = mask.transform(mask.size, Image.AFFINE, affine_params,
resample=Image.NEAREST, fillcolor=fillmask)
return img, mask
def ShearX(pair, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return affine_transform(pair, (1, v, 0, 0, 1, 0))
def ShearY(pair, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return affine_transform(pair, (1, 0, 0, v, 1, 0))
def TranslateX(pair, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
img, _ = pair
v = v * img.size[0]
return affine_transform(pair, (1, 0, v, 0, 1, 0))
def TranslateY(pair, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
img, _ = pair
v = v * img.size[1]
return affine_transform(pair, (1, 0, 0, 0, 1, v))
def TranslateXAbs(pair, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return affine_transform(pair, (1, 0, v, 0, 1, 0))
def TranslateYAbs(pair, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return affine_transform(pair, (1, 0, 0, 0, 1, v))
def Rotate(pair, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
img, mask = pair
img = img.rotate(v, fillcolor=fillcolor)
mask = mask.rotate(v, resample=Image.NEAREST, fillcolor=fillmask)
return img, mask
def AutoContrast(pair, _):
img, mask = pair
return ImageOps.autocontrast(img), mask
def Invert(pair, _):
img, mask = pair
return ImageOps.invert(img), mask
def Equalize(pair, _):
img, mask = pair
return ImageOps.equalize(img), mask
def Flip(pair, _): # not from the paper
img, mask = pair
return ImageOps.mirror(img), ImageOps.mirror(mask)
def Solarize(pair, v): # [0, 256]
img, mask = pair
assert 0 <= v <= 256
return ImageOps.solarize(img, v), mask
def Posterize(pair, v): # [4, 8]
img, mask = pair
assert 4 <= v <= 8
v = int(v)
return ImageOps.posterize(img, v), mask
def Posterize2(pair, v): # [0, 4]
img, mask = pair
assert 0 <= v <= 4
v = int(v)
return ImageOps.posterize(img, v), mask
def Contrast(pair, v): # [0.1,1.9]
img, mask = pair
assert 0.1 <= v <= 1.9
return ImageEnhance.Contrast(img).enhance(v), mask
def Color(pair, v): # [0.1,1.9]
img, mask = pair
assert 0.1 <= v <= 1.9
return ImageEnhance.Color(img).enhance(v), mask
def Brightness(pair, v): # [0.1,1.9]
img, mask = pair
assert 0.1 <= v <= 1.9
return ImageEnhance.Brightness(img).enhance(v), mask
def Sharpness(pair, v): # [0.1,1.9]
img, mask = pair
assert 0.1 <= v <= 1.9
return ImageEnhance.Sharpness(img).enhance(v), mask
def Cutout(pair, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return pair
img, mask = pair
v = v * img.size[0]
return CutoutAbs(img, v), mask
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, color)
return img
def Identity(pair, v):
return pair
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
l = [
(Identity, 0., 1.0),
(ShearX, 0., 0.3), # 0
(ShearY, 0., 0.3), # 1
(TranslateX, 0., 0.33), # 2
(TranslateY, 0., 0.33), # 3
(Rotate, 0, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 110), # 8
(Posterize, 4, 8), # 9
# (Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
# (Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
# (Flip, 1, 1),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img, mask):
pair = img, mask
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
pair = op(pair, val)
return pair
| semantic-segmentation-main | datasets/randaugment.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Generic dataloader base class
"""
import os
import glob
import numpy as np
import torch
from PIL import Image
from torch.utils import data
from config import cfg
from datasets import uniform
from runx.logx import logx
from utils.misc import tensor_to_pil
class BaseLoader(data.Dataset):
def __init__(self, quality, mode, joint_transform_list, img_transform,
label_transform):
super(BaseLoader, self).__init__()
self.quality = quality
self.mode = mode
self.joint_transform_list = joint_transform_list
self.img_transform = img_transform
self.label_transform = label_transform
self.train = mode == 'train'
self.id_to_trainid = {}
self.centroids = None
self.all_imgs = None
self.drop_mask = np.zeros((1024, 2048))
self.drop_mask[15:840, 14:2030] = 1.0
def build_epoch(self):
"""
For class uniform sampling ... every epoch, we want to recompute
which tiles from which images we want to sample from, so that the
sampling is uniformly random.
"""
self.imgs = uniform.build_epoch(self.all_imgs,
self.centroids,
self.num_classes,
self.train)
@staticmethod
def find_images(img_root, mask_root, img_ext, mask_ext):
"""
Find image and segmentation mask files and return a list of
tuples of them.
"""
img_path = '{}/*.{}'.format(img_root, img_ext)
imgs = glob.glob(img_path)
items = []
for full_img_fn in imgs:
img_dir, img_fn = os.path.split(full_img_fn)
img_name, _ = os.path.splitext(img_fn)
full_mask_fn = '{}.{}'.format(img_name, mask_ext)
full_mask_fn = os.path.join(mask_root, full_mask_fn)
assert os.path.exists(full_mask_fn)
items.append((full_img_fn, full_mask_fn))
return items
def disable_coarse(self):
pass
def colorize_mask(self, image_array):
"""
Colorize the segmentation mask
"""
new_mask = Image.fromarray(image_array.astype(np.uint8)).convert('P')
new_mask.putpalette(self.color_mapping)
return new_mask
def dump_images(self, img_name, mask, centroid, class_id, img):
img = tensor_to_pil(img)
outdir = 'new_dump_imgs_{}'.format(self.mode)
os.makedirs(outdir, exist_ok=True)
if centroid is not None:
dump_img_name = '{}_{}'.format(self.trainid_to_name[class_id],
img_name)
else:
dump_img_name = img_name
out_img_fn = os.path.join(outdir, dump_img_name + '.png')
out_msk_fn = os.path.join(outdir, dump_img_name + '_mask.png')
out_raw_fn = os.path.join(outdir, dump_img_name + '_mask_raw.png')
mask_img = self.colorize_mask(np.array(mask))
raw_img = Image.fromarray(np.array(mask))
img.save(out_img_fn)
mask_img.save(out_msk_fn)
raw_img.save(out_raw_fn)
def do_transforms(self, img, mask, centroid, img_name, class_id):
"""
Do transformations to image and mask
:returns: image, mask
"""
scale_float = 1.0
if self.joint_transform_list is not None:
for idx, xform in enumerate(self.joint_transform_list):
if idx == 0 and centroid is not None:
# HACK! Assume the first transform accepts a centroid
outputs = xform(img, mask, centroid)
else:
outputs = xform(img, mask)
if len(outputs) == 3:
img, mask, scale_float = outputs
else:
img, mask = outputs
if self.img_transform is not None:
img = self.img_transform(img)
if cfg.DATASET.DUMP_IMAGES:
self.dump_images(img_name, mask, centroid, class_id, img)
if self.label_transform is not None:
mask = self.label_transform(mask)
return img, mask, scale_float
def read_images(self, img_path, mask_path, mask_out=False):
img = Image.open(img_path).convert('RGB')
if mask_path is None or mask_path == '':
w, h = img.size
mask = np.zeros((h, w))
else:
mask = Image.open(mask_path)
drop_out_mask = None
# This code is specific to cityscapes
if(cfg.DATASET.CITYSCAPES_CUSTOMCOARSE in mask_path):
gtCoarse_mask_path = mask_path.replace(cfg.DATASET.CITYSCAPES_CUSTOMCOARSE, os.path.join(cfg.DATASET.CITYSCAPES_DIR, 'gtCoarse/gtCoarse') )
gtCoarse_mask_path = gtCoarse_mask_path.replace('leftImg8bit','gtCoarse_labelIds')
gtCoarse=np.array(Image.open(gtCoarse_mask_path))
img_name = os.path.splitext(os.path.basename(img_path))[0]
mask = np.array(mask)
if (mask_out):
mask = self.drop_mask * mask
mask = mask.copy()
for k, v in self.id_to_trainid.items():
binary_mask = (mask == k) #+ (gtCoarse == k)
if ('refinement' in mask_path) and cfg.DROPOUT_COARSE_BOOST_CLASSES != None and v in cfg.DROPOUT_COARSE_BOOST_CLASSES and binary_mask.sum() > 0 and 'vidseq' not in mask_path:
binary_mask += (gtCoarse == k)
binary_mask[binary_mask >= 1] = 1
mask[binary_mask] = gtCoarse[binary_mask]
mask[binary_mask] = v
mask = Image.fromarray(mask.astype(np.uint8))
return img, mask, img_name
def __getitem__(self, index):
"""
Generate data:
:return:
- image: image, tensor
- mask: mask, tensor
- image_name: basename of file, string
"""
# Pick an image, fill in defaults if not using class uniform
if len(self.imgs[index]) == 2:
img_path, mask_path = self.imgs[index]
centroid = None
class_id = None
else:
img_path, mask_path, centroid, class_id = self.imgs[index]
mask_out = cfg.DATASET.MASK_OUT_CITYSCAPES and \
cfg.DATASET.CUSTOM_COARSE_PROB is not None and \
'refinement' in mask_path
img, mask, img_name = self.read_images(img_path, mask_path,
mask_out=mask_out)
######################################################################
# Thresholding is done when using coarse-labelled Cityscapes images
######################################################################
if 'refinement' in mask_path:
mask = np.array(mask)
prob_mask_path = mask_path.replace('.png', '_prob.png')
# put it in 0 to 1
prob_map = np.array(Image.open(prob_mask_path)) / 255.0
prob_map_threshold = (prob_map < cfg.DATASET.CUSTOM_COARSE_PROB)
mask[prob_map_threshold] = cfg.DATASET.IGNORE_LABEL
mask = Image.fromarray(mask.astype(np.uint8))
img, mask, scale_float = self.do_transforms(img, mask, centroid,
img_name, class_id)
return img, mask, img_name, scale_float
def __len__(self):
return len(self.imgs)
def calculate_weights(self):
raise BaseException("not supported yet")
| semantic-segmentation-main | datasets/base_loader.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Null Loader
"""
from config import cfg
from runx.logx import logx
from datasets.base_loader import BaseLoader
from datasets.utils import make_dataset_folder
from datasets import uniform
import numpy as np
import torch
from torch.utils import data
class Loader(BaseLoader):
"""
Null Dataset for Performance
"""
num_classes = 19
ignore_label = 255
trainid_to_name = {}
color_mapping = []
def __init__(self, mode, quality=None, joint_transform_list=None,
img_transform=None, label_transform=None, eval_folder=None):
super(Loader, self).__init__(quality=quality,
mode=mode,
joint_transform_list=joint_transform_list,
img_transform=img_transform,
label_transform=label_transform)
def __getitem__(self, index):
# return img, mask, img_name, scale_float
crop_size = cfg.DATASET.CROP_SIZE
if ',' in crop_size:
crop_size = [int(x) for x in crop_size.split(',')]
else:
crop_size = int(crop_size)
crop_size = [crop_size, crop_size]
img = torch.FloatTensor(np.zeros([3] + crop_size))
mask = torch.LongTensor(np.zeros(crop_size))
img_name = f'img{index}'
scale_float = 0.0
return img, mask, img_name, scale_float
def __len__(self):
return 3000
| semantic-segmentation-main | datasets/nullloader.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Uniform sampling of classes.
For all images, for all classes, generate centroids around which to sample.
All images are divided into tiles.
For each tile, a class can be present or not. If it is
present, calculate the centroid of the class and record it.
We would like to thank Peter Kontschieder for the inspiration of this idea.
"""
import sys
import os
import json
import numpy as np
import torch
from collections import defaultdict
from scipy.ndimage.measurements import center_of_mass
from PIL import Image
from tqdm import tqdm
from config import cfg
from runx.logx import logx
pbar = None
class Point():
"""
Point Class For X and Y Location
"""
def __init__(self, x, y):
self.x = x
self.y = y
def calc_tile_locations(tile_size, image_size):
"""
Divide an image into tiles to help us cover classes that are spread out.
tile_size: size of tile to distribute
image_size: original image size
return: locations of the tiles
"""
image_size_y, image_size_x = image_size
locations = []
for y in range(image_size_y // tile_size):
for x in range(image_size_x // tile_size):
x_offs = x * tile_size
y_offs = y * tile_size
locations.append((x_offs, y_offs))
return locations
def class_centroids_image(item, tile_size, num_classes, id2trainid):
"""
For one image, calculate centroids for all classes present in image.
item: image, image_name
tile_size:
num_classes:
id2trainid: mapping from original id to training ids
return: Centroids are calculated for each tile.
"""
image_fn, label_fn = item
centroids = defaultdict(list)
mask = np.array(Image.open(label_fn))
image_size = mask.shape
tile_locations = calc_tile_locations(tile_size, image_size)
drop_mask = np.zeros((1024,2048))
drop_mask[15:840, 14:2030] = 1.0
#####
if(cfg.DATASET.CITYSCAPES_CUSTOMCOARSE in label_fn):
gtCoarse_mask_path = label_fn.replace(cfg.DATASET.CITYSCAPES_CUSTOMCOARSE, os.path.join(cfg.DATASET.CITYSCAPES_DIR, 'gtCoarse/gtCoarse') )
gtCoarse_mask_path = gtCoarse_mask_path.replace('leftImg8bit','gtCoarse_labelIds')
gtCoarse=np.array(Image.open(gtCoarse_mask_path))
####
mask_copy = mask.copy()
if id2trainid:
for k, v in id2trainid.items():
binary_mask = (mask_copy == k)
#This should only apply to auto labelled images
if ('refinement' in label_fn) and cfg.DROPOUT_COARSE_BOOST_CLASSES != None and v in cfg.DROPOUT_COARSE_BOOST_CLASSES and binary_mask.sum() > 0:
binary_mask += (gtCoarse == k)
binary_mask[binary_mask >= 1] = 1
mask[binary_mask] = gtCoarse[binary_mask]
mask[binary_mask] = v
for x_offs, y_offs in tile_locations:
patch = mask[y_offs:y_offs + tile_size, x_offs:x_offs + tile_size]
for class_id in range(num_classes):
if class_id in patch:
patch_class = (patch == class_id).astype(int)
centroid_y, centroid_x = center_of_mass(patch_class)
centroid_y = int(centroid_y) + y_offs
centroid_x = int(centroid_x) + x_offs
centroid = (centroid_x, centroid_y)
centroids[class_id].append((image_fn, label_fn, centroid,
class_id))
pbar.update(1)
return centroids
def pooled_class_centroids_all(items, num_classes, id2trainid, tile_size=1024):
"""
Calculate class centroids for all classes for all images for all tiles.
items: list of (image_fn, label_fn)
tile size: size of tile
returns: dict that contains a list of centroids for each class
"""
from multiprocessing.dummy import Pool
from functools import partial
pool = Pool(80)
global pbar
pbar = tqdm(total=len(items), desc='pooled centroid extraction', file=sys.stdout)
class_centroids_item = partial(class_centroids_image,
num_classes=num_classes,
id2trainid=id2trainid,
tile_size=tile_size)
centroids = defaultdict(list)
new_centroids = pool.map(class_centroids_item, items)
pool.close()
pool.join()
# combine each image's items into a single global dict
for image_items in new_centroids:
for class_id in image_items:
centroids[class_id].extend(image_items[class_id])
return centroids
def unpooled_class_centroids_all(items, num_classes, id2trainid,
tile_size=1024):
"""
Calculate class centroids for all classes for all images for all tiles.
items: list of (image_fn, label_fn)
tile size: size of tile
returns: dict that contains a list of centroids for each class
"""
centroids = defaultdict(list)
global pbar
pbar = tqdm(total=len(items), desc='centroid extraction', file=sys.stdout)
for image, label in items:
new_centroids = class_centroids_image(item=(image, label),
tile_size=tile_size,
num_classes=num_classes,
id2trainid=id2trainid)
for class_id in new_centroids:
centroids[class_id].extend(new_centroids[class_id])
return centroids
def class_centroids_all(items, num_classes, id2trainid, tile_size=1024):
"""
intermediate function to call pooled_class_centroid
"""
pooled_centroids = pooled_class_centroids_all(items, num_classes,
id2trainid, tile_size)
# pooled_centroids = unpooled_class_centroids_all(items, num_classes,
# id2trainid, tile_size)
return pooled_centroids
def random_sampling(alist, num):
"""
Randomly sample num items from the list
alist: list of centroids to sample from
num: can be larger than the list and if so, then wrap around
return: class uniform samples from the list
"""
sampling = []
len_list = len(alist)
assert len_list, 'len_list is zero!'
indices = np.arange(len_list)
np.random.shuffle(indices)
for i in range(num):
item = alist[indices[i % len_list]]
sampling.append(item)
return sampling
def build_centroids(imgs, num_classes, train, cv=None, coarse=False,
custom_coarse=False, id2trainid=None):
"""
The first step of uniform sampling is to decide sampling centers.
The idea is to divide each image into tiles and within each tile,
we compute a centroid for each class to indicate roughly where to
sample a crop during training.
This function computes these centroids and returns a list of them.
"""
if not (cfg.DATASET.CLASS_UNIFORM_PCT and train):
return []
centroid_fn = cfg.DATASET.NAME
if coarse or custom_coarse:
if coarse:
centroid_fn += '_coarse'
if custom_coarse:
centroid_fn += '_customcoarse_final'
else:
centroid_fn += '_cv{}'.format(cv)
centroid_fn += '_tile{}.json'.format(cfg.DATASET.CLASS_UNIFORM_TILE)
json_fn = os.path.join(cfg.DATASET.CENTROID_ROOT,
centroid_fn)
if os.path.isfile(json_fn):
logx.msg('Loading centroid file {}'.format(json_fn))
with open(json_fn, 'r') as json_data:
centroids = json.load(json_data)
centroids = {int(idx): centroids[idx] for idx in centroids}
logx.msg('Found {} centroids'.format(len(centroids)))
else:
logx.msg('Didn\'t find {}, so building it'.format(json_fn))
if cfg.GLOBAL_RANK==0:
os.makedirs(cfg.DATASET.CENTROID_ROOT, exist_ok=True)
# centroids is a dict (indexed by class) of lists of centroids
centroids = class_centroids_all(
imgs,
num_classes,
id2trainid=id2trainid)
with open(json_fn, 'w') as outfile:
json.dump(centroids, outfile, indent=4)
# wait for everyone to be at the same point
torch.distributed.barrier()
# GPUs (except rank0) read in the just-created centroid file
if cfg.GLOBAL_RANK != 0:
msg = f'Expected to find {json_fn}'
assert os.path.isfile(json_fn), msg
with open(json_fn, 'r') as json_data:
centroids = json.load(json_data)
centroids = {int(idx): centroids[idx] for idx in centroids}
return centroids
def build_epoch(imgs, centroids, num_classes, train):
"""
Generate an epoch of crops using uniform sampling.
Needs to be called every epoch.
Will not apply uniform sampling if not train or class uniform is off.
Inputs:
imgs - list of imgs
centroids - list of class centroids
num_classes - number of classes
class_uniform_pct: % of uniform images in one epoch
Outputs:
imgs - list of images to use this epoch
"""
class_uniform_pct = cfg.DATASET.CLASS_UNIFORM_PCT
if not (train and class_uniform_pct):
return imgs
logx.msg("Class Uniform Percentage: {}".format(str(class_uniform_pct)))
num_epoch = int(len(imgs))
logx.msg('Class Uniform items per Epoch: {}'.format(str(num_epoch)))
num_per_class = int((num_epoch * class_uniform_pct) / num_classes)
class_uniform_count = num_per_class * num_classes
num_rand = num_epoch - class_uniform_count
# create random crops
imgs_uniform = random_sampling(imgs, num_rand)
# now add uniform sampling
for class_id in range(num_classes):
msg = "cls {} len {}".format(class_id, len(centroids[class_id]))
logx.msg(msg)
for class_id in range(num_classes):
if cfg.DATASET.CLASS_UNIFORM_BIAS is not None:
bias = cfg.DATASET.CLASS_UNIFORM_BIAS[class_id]
num_per_class_biased = int(num_per_class * bias)
else:
num_per_class_biased = num_per_class
centroid_len = len(centroids[class_id])
if centroid_len == 0:
pass
else:
class_centroids = random_sampling(centroids[class_id],
num_per_class_biased)
imgs_uniform.extend(class_centroids)
return imgs_uniform
| semantic-segmentation-main | datasets/uniform.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Mapillary Dataset Loader
"""
import os
import json
from config import cfg
from runx.logx import logx
from datasets.base_loader import BaseLoader
from datasets.utils import make_dataset_folder
from datasets import uniform
class Loader(BaseLoader):
num_classes = 65
ignore_label = 65
trainid_to_name = {}
color_mapping = []
def __init__(self, mode, quality='semantic', joint_transform_list=None,
img_transform=None, label_transform=None, eval_folder=None):
super(Loader, self).__init__(quality=quality,
mode=mode,
joint_transform_list=joint_transform_list,
img_transform=img_transform,
label_transform=label_transform)
root = cfg.DATASET.MAPILLARY_DIR
config_fn = os.path.join(root, 'config.json')
self.fill_colormap_and_names(config_fn)
######################################################################
# Assemble image lists
######################################################################
if mode == 'folder':
self.all_imgs = make_dataset_folder(eval_folder)
else:
splits = {'train': 'training',
'val': 'validation',
'test': 'testing'}
split_name = splits[mode]
img_ext = 'jpg'
mask_ext = 'png'
img_root = os.path.join(root, split_name, 'images')
mask_root = os.path.join(root, split_name, 'labels')
self.all_imgs = self.find_images(img_root, mask_root, img_ext,
mask_ext)
logx.msg('all imgs {}'.format(len(self.all_imgs)))
self.centroids = uniform.build_centroids(self.all_imgs,
self.num_classes,
self.train,
cv=cfg.DATASET.CV)
self.build_epoch()
def fill_colormap_and_names(self, config_fn):
"""
Mapillary code for color map and class names
Outputs
-------
self.trainid_to_name
self.color_mapping
"""
with open(config_fn) as config_file:
config = json.load(config_file)
config_labels = config['labels']
# calculate label color mapping
colormap = []
self.trainid_to_name = {}
for i in range(0, len(config_labels)):
colormap = colormap + config_labels[i]['color']
name = config_labels[i]['readable']
name = name.replace(' ', '_')
self.trainid_to_name[i] = name
self.color_mapping = colormap
| semantic-segmentation-main | datasets/mapillary.py |
"""
# Code adapted from:
# https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py
#
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import math
import torch
from torch.distributed import get_world_size, get_rank
from torch.utils.data import Sampler
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, pad=False, consecutive_sample=False, permutation=False, num_replicas=None, rank=None):
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.consecutive_sample = consecutive_sample
self.permutation = permutation
if pad:
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
else:
self.num_samples = int(math.floor(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.permutation:
indices = list(torch.randperm(len(self.dataset), generator=g))
else:
indices = list([x for x in range(len(self.dataset))])
# add extra samples to make it evenly divisible
if self.total_size > len(indices):
indices += indices[:(self.total_size - len(indices))]
# subsample
if self.consecutive_sample:
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
else:
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def set_num_samples(self):
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas | semantic-segmentation-main | datasets/sampler.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Miscellanous Functions
"""
import cv2
import sys
import os
import torch
import numpy as np
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
from tabulate import tabulate
from PIL import Image
from config import cfg
from utils.results_page import ResultsPage
from runx.logx import logx
def fast_hist(pred, gtruth, num_classes):
# mask indicates pixels we care about
mask = (gtruth >= 0) & (gtruth < num_classes)
# stretch ground truth labels by num_classes
# class 0 -> 0
# class 1 -> 19
# class 18 -> 342
#
# TP at 0 + 0, 1 + 1, 2 + 2 ...
#
# TP exist where value == num_classes*class_id + class_id
# FP = row[class].sum() - TP
# FN = col[class].sum() - TP
hist = np.bincount(num_classes * gtruth[mask].astype(int) + pred[mask],
minlength=num_classes ** 2)
hist = hist.reshape(num_classes, num_classes)
return hist
def prep_experiment(args):
"""
Make output directories, setup logging, Tensorboard, snapshot code.
"""
args.ngpu = torch.cuda.device_count()
args.best_record = {'mean_iu': -1, 'epoch': 0}
def calculate_iou(hist_data):
acc = np.diag(hist_data).sum() / hist_data.sum()
acc_cls = np.diag(hist_data) / hist_data.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
divisor = hist_data.sum(axis=1) + hist_data.sum(axis=0) - \
np.diag(hist_data)
iu = np.diag(hist_data) / divisor
return iu, acc, acc_cls
def tensor_to_pil(img):
inv_mean = [-mean / std for mean, std in zip(cfg.DATASET.MEAN,
cfg.DATASET.STD)]
inv_std = [1 / std for std in cfg.DATASET.STD]
inv_normalize = standard_transforms.Normalize(
mean=inv_mean, std=inv_std
)
img = inv_normalize(img)
img = img.cpu()
img = standard_transforms.ToPILImage()(img).convert('RGB')
return img
def eval_metrics(iou_acc, args, net, optim, val_loss, epoch, mf_score=None):
"""
Modified IOU mechanism for on-the-fly IOU calculations ( prevents memory
overflow for large dataset) Only applies to eval/eval.py
"""
was_best = False
iou_per_scale = {}
iou_per_scale[1.0] = iou_acc
if args.apex:
iou_acc_tensor = torch.cuda.FloatTensor(iou_acc)
torch.distributed.all_reduce(iou_acc_tensor,
op=torch.distributed.ReduceOp.SUM)
iou_per_scale[1.0] = iou_acc_tensor.cpu().numpy()
scales = [1.0]
# Only rank 0 should save models and calculate metrics
if args.global_rank != 0:
return
hist = iou_per_scale[args.default_scale]
iu, acc, acc_cls = calculate_iou(hist)
iou_per_scale = {args.default_scale: iu}
# calculate iou for other scales
for scale in scales:
if scale != args.default_scale:
iou_per_scale[scale], _, _ = calculate_iou(iou_per_scale[scale])
print_evaluate_results(hist, iu, epoch=epoch,
iou_per_scale=iou_per_scale,
log_multiscale_tb=args.log_msinf_to_tb)
freq = hist.sum(axis=1) / hist.sum()
mean_iu = np.nanmean(iu)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
metrics = {
'loss': val_loss.avg,
'mean_iu': mean_iu,
'acc_cls': acc_cls,
'acc': acc,
}
logx.metric('val', metrics, epoch)
logx.msg('Mean: {:2.2f}'.format(mean_iu * 100))
save_dict = {
'epoch': epoch,
'arch': args.arch,
'num_classes': cfg.DATASET_INST.num_classes,
'state_dict': net.state_dict(),
'optimizer': optim.state_dict(),
'mean_iu': mean_iu,
'command': ' '.join(sys.argv[1:])
}
logx.save_model(save_dict, metric=mean_iu, epoch=epoch)
torch.cuda.synchronize()
if mean_iu > args.best_record['mean_iu']:
was_best = True
args.best_record['val_loss'] = val_loss.avg
if mf_score is not None:
args.best_record['mask_f1_score'] = mf_score.avg
args.best_record['acc'] = acc
args.best_record['acc_cls'] = acc_cls
args.best_record['fwavacc'] = fwavacc
args.best_record['mean_iu'] = mean_iu
args.best_record['epoch'] = epoch
logx.msg('-' * 107)
if mf_score is None:
fmt_str = ('{:5}: [epoch {}], [val loss {:0.5f}], [acc {:0.5f}], '
'[acc_cls {:.5f}], [mean_iu {:.5f}], [fwavacc {:0.5f}]')
current_scores = fmt_str.format('this', epoch, val_loss.avg, acc,
acc_cls, mean_iu, fwavacc)
logx.msg(current_scores)
best_scores = fmt_str.format(
'best',
args.best_record['epoch'], args.best_record['val_loss'],
args.best_record['acc'], args.best_record['acc_cls'],
args.best_record['mean_iu'], args.best_record['fwavacc'])
logx.msg(best_scores)
else:
fmt_str = ('{:5}: [epoch {}], [val loss {:0.5f}], [mask f1 {:.5f} ] '
'[acc {:0.5f}], '
'[acc_cls {:.5f}], [mean_iu {:.5f}], [fwavacc {:0.5f}]')
current_scores = fmt_str.format('this', epoch, val_loss.avg,
mf_score.avg, acc,
acc_cls, mean_iu, fwavacc)
logx.msg(current_scores)
best_scores = fmt_str.format(
'best',
args.best_record['epoch'], args.best_record['val_loss'],
args.best_record['mask_f1_score'],
args.best_record['acc'], args.best_record['acc_cls'],
args.best_record['mean_iu'], args.best_record['fwavacc'])
logx.msg(best_scores)
logx.msg('-' * 107)
return was_best
class ImageDumper():
"""
Image dumping class
You pass images/tensors from training pipeline into this object and it first
converts them to images (doing transformations where necessary) and then
writes the images out to disk.
"""
def __init__(self, val_len, tensorboard=True, write_webpage=True,
webpage_fn='index.html', dump_all_images=False, dump_assets=False,
dump_err_prob=False, dump_num=10, dump_for_auto_labelling=False,
dump_for_submission=False):
"""
:val_len: num validation images
:tensorboard: push summary to tensorboard
:webpage: generate a summary html page
:webpage_fn: name of webpage file
:dump_all_images: dump all (validation) images, e.g. for video
:dump_num: number of images to dump if not dumping all
:dump_assets: dump attention maps
"""
self.val_len = val_len
self.tensorboard = tensorboard
self.write_webpage = write_webpage
self.webpage_fn = os.path.join(cfg.RESULT_DIR,
'best_images', webpage_fn)
self.dump_assets = dump_assets
self.dump_for_auto_labelling = dump_for_auto_labelling
self.dump_for_submission = dump_for_submission
self.viz_frequency = max(1, val_len // dump_num)
if dump_all_images:
self.dump_frequency = 1
else:
self.dump_frequency = self.viz_frequency
inv_mean = [-mean / std for mean, std in zip(cfg.DATASET.MEAN,
cfg.DATASET.STD)]
inv_std = [1 / std for std in cfg.DATASET.STD]
self.inv_normalize = standard_transforms.Normalize(
mean=inv_mean, std=inv_std
)
if self.dump_for_submission:
self.save_dir = os.path.join(cfg.RESULT_DIR, 'submit')
elif self.dump_for_auto_labelling:
self.save_dir = os.path.join(cfg.RESULT_DIR)
else:
self.save_dir = os.path.join(cfg.RESULT_DIR, 'best_images')
os.makedirs(self.save_dir, exist_ok=True)
self.imgs_to_tensorboard = []
self.imgs_to_webpage = []
if cfg.DATASET.NAME == 'cityscapes':
# If all images of a dataset are identical, as in cityscapes,
# there's no need to crop the images before tiling them into a
# grid for displaying in tensorboard. Otherwise, need to center
# crop the images
self.visualize = standard_transforms.Compose([
standard_transforms.Resize(384),
standard_transforms.ToTensor()
])
else:
self.visualize = standard_transforms.Compose([
standard_transforms.Resize(384),
standard_transforms.CenterCrop((384, 384)),
standard_transforms.ToTensor()
])
def reset(self):
self.imgs_to_tensorboard = []
self.imgs_to_webpage = []
def dump(self, dump_dict, val_idx):
"""
dump a single batch of images
:dump_dict: a dictionary containing elements to dump out
'input_images': source image
'gt_images': label
'img_names': img_names
'assets': dict with keys:
'predictions': final prediction
'pred_*': different scales of predictions
'attn_*': different scales of attn
'err_mask': err_mask
"""
if self.dump_for_auto_labelling or self.dump_for_submission:
pass
elif (val_idx % self.dump_frequency or cfg.GLOBAL_RANK != 0):
return
else:
pass
colorize_mask_fn = cfg.DATASET_INST.colorize_mask
idx = 0 # only use first element of batch
input_image = dump_dict['input_images'][idx]
prob_image = dump_dict['assets']['prob_mask'][idx]
gt_image = dump_dict['gt_images'][idx]
prediction = dump_dict['assets']['predictions'][idx]
del dump_dict['assets']['predictions']
img_name = dump_dict['img_names'][idx]
if self.dump_for_auto_labelling:
# Dump Prob
prob_fn = '{}_prob.png'.format(img_name)
prob_fn = os.path.join(self.save_dir, prob_fn)
cv2.imwrite(prob_fn, (prob_image.cpu().numpy()*255).astype(np.uint8))
if self.dump_for_auto_labelling or self.dump_for_submission:
# Dump Predictions
prediction_cpu = np.array(prediction)
label_out = np.zeros_like(prediction)
submit_fn = '{}.png'.format(img_name)
for label_id, train_id in cfg.DATASET_INST.id_to_trainid.items():
label_out[np.where(prediction_cpu == train_id)] = label_id
cv2.imwrite(os.path.join(self.save_dir, submit_fn), label_out)
return
input_image = self.inv_normalize(input_image)
input_image = input_image.cpu()
input_image = standard_transforms.ToPILImage()(input_image)
input_image = input_image.convert("RGB")
input_image_fn = f'{img_name}_input.png'
input_image.save(os.path.join(self.save_dir, input_image_fn))
gt_fn = '{}_gt.png'.format(img_name)
gt_pil = colorize_mask_fn(gt_image.cpu().numpy())
gt_pil.save(os.path.join(self.save_dir, gt_fn))
prediction_fn = '{}_prediction.png'.format(img_name)
prediction_pil = colorize_mask_fn(prediction)
prediction_pil.save(os.path.join(self.save_dir, prediction_fn))
prediction_pil = prediction_pil.convert('RGB')
composited = Image.blend(input_image, prediction_pil, 0.4)
composited_fn = 'composited_{}.png'.format(img_name)
composited_fn = os.path.join(self.save_dir, composited_fn)
composited.save(composited_fn)
# only visualize a limited number of images
if val_idx % self.viz_frequency or cfg.GLOBAL_RANK != 0:
return
to_tensorboard = [
self.visualize(input_image.convert('RGB')),
self.visualize(gt_pil.convert('RGB')),
self.visualize(prediction_pil.convert('RGB')),
]
to_webpage = [
(input_image_fn, 'input'),
(gt_fn, 'gt'),
(prediction_fn, 'prediction'),
]
if self.dump_assets:
assets = dump_dict['assets']
for asset in assets:
mask = assets[asset][idx]
mask_fn = os.path.join(self.save_dir, f'{img_name}_{asset}.png')
if 'pred_' in asset:
pred_pil = colorize_mask_fn(mask)
pred_pil.save(mask_fn)
continue
if type(mask) == torch.Tensor:
mask = mask.squeeze().cpu().numpy()
else:
mask = mask.squeeze()
mask = (mask * 255)
mask = mask.astype(np.uint8)
mask_pil = Image.fromarray(mask)
mask_pil = mask_pil.convert('RGB')
mask_pil.save(mask_fn)
to_tensorboard.append(self.visualize(mask_pil))
to_webpage.append((mask_fn, asset))
self.imgs_to_tensorboard.append(to_tensorboard)
self.imgs_to_webpage.append(to_webpage)
def write_summaries(self, was_best):
"""
write out tensorboard
write out html webpage summary
only update tensorboard if was a best epoch
always update webpage
always save N images
"""
if self.write_webpage:
ip = ResultsPage('prediction examples', self.webpage_fn)
for img_set in self.imgs_to_webpage:
ip.add_table(img_set)
ip.write_page()
if self.tensorboard and was_best:
if len(self.imgs_to_tensorboard):
num_per_row = len(self.imgs_to_tensorboard[0])
# flatten array:
flattenned = []
for a in self.imgs_to_tensorboard:
for b in a:
flattenned.append(b)
imgs_to_tensorboard = torch.stack(flattenned, 0)
imgs_to_tensorboard = vutils.make_grid(
imgs_to_tensorboard, nrow=num_per_row, padding=5)
logx.add_image('imgs', imgs_to_tensorboard, cfg.EPOCH)
def print_evaluate_results(hist, iu, epoch=0, iou_per_scale=None,
log_multiscale_tb=False):
"""
If single scale:
just print results for default scale
else
print all scale results
Inputs:
hist = histogram for default scale
iu = IOU for default scale
iou_per_scale = iou for all scales
"""
id2cat = cfg.DATASET_INST.trainid_to_name
# id2cat = {i: i for i in range(cfg.DATASET.NUM_CLASSES)}
iu_FP = hist.sum(axis=1) - np.diag(hist)
iu_FN = hist.sum(axis=0) - np.diag(hist)
iu_TP = np.diag(hist)
logx.msg('IoU:')
header = ['Id', 'label']
header.extend(['iU_{}'.format(scale) for scale in iou_per_scale])
header.extend(['TP', 'FP', 'FN', 'Precision', 'Recall'])
tabulate_data = []
for class_id in range(len(iu)):
class_data = []
class_data.append(class_id)
class_name = "{}".format(id2cat[class_id]) if class_id in id2cat else ''
class_data.append(class_name)
for scale in iou_per_scale:
class_data.append(iou_per_scale[scale][class_id] * 100)
total_pixels = hist.sum()
class_data.append(100 * iu_TP[class_id] / total_pixels)
class_data.append(iu_FP[class_id] / iu_TP[class_id])
class_data.append(iu_FN[class_id] / iu_TP[class_id])
class_data.append(iu_TP[class_id] / (iu_TP[class_id] + iu_FP[class_id]))
class_data.append(iu_TP[class_id] / (iu_TP[class_id] + iu_FN[class_id]))
tabulate_data.append(class_data)
if log_multiscale_tb:
logx.add_scalar("xscale_%0.1f/%s" % (0.5, str(id2cat[class_id])),
float(iou_per_scale[0.5][class_id] * 100), epoch)
logx.add_scalar("xscale_%0.1f/%s" % (1.0, str(id2cat[class_id])),
float(iou_per_scale[1.0][class_id] * 100), epoch)
logx.add_scalar("xscale_%0.1f/%s" % (2.0, str(id2cat[class_id])),
float(iou_per_scale[2.0][class_id] * 100), epoch)
print_str = str(tabulate((tabulate_data), headers=header, floatfmt='1.2f'))
logx.msg(print_str)
def metrics_per_image(hist):
"""
Calculate tp, fp, fn for one image
"""
FP = hist.sum(axis=1) - np.diag(hist)
FN = hist.sum(axis=0) - np.diag(hist)
return FP, FN
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def fmt_scale(prefix, scale):
"""
format scale name
:prefix: a string that is the beginning of the field name
:scale: a scale value (0.25, 0.5, 1.0, 2.0)
"""
scale_str = str(float(scale))
scale_str.replace('.', '')
return f'{prefix}_{scale_str}x'
| semantic-segmentation-main | utils/misc.py |
"""
# Code adapted from:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/collections.py
Source License
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if not self.__dict__[AttrDict.IMMUTABLE]:
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError(
'Attempted to set "{}" to "{}", but AttrDict is immutable'.
format(name, value)
)
def immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested AttrDicts.
"""
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
| semantic-segmentation-main | utils/attr_dict.py |
semantic-segmentation-main | utils/__init__.py |
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
# Code adapted from:
# https://github.com/fperazzi/davis/blob/master/python/lib/davis/measures/f_boundary.py
#
# Source License
#
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.s
##############################################################################
#
# Based on:
# ----------------------------------------------------------------------------
# A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation
# Copyright (c) 2016 Federico Perazzi
# Licensed under the BSD License [see LICENSE for details]
# Written by Federico Perazzi
# ----------------------------------------------------------------------------
"""
import numpy as np
from multiprocessing import Pool
from tqdm import tqdm
from config import cfg
""" Utilities for computing, reading and saving benchmark evaluation."""
def eval_mask_boundary(seg_mask,gt_mask,num_classes,num_proc=10,bound_th=0.008):
"""
Compute F score for a segmentation mask
Arguments:
seg_mask (ndarray): segmentation mask prediction
gt_mask (ndarray): segmentation mask ground truth
num_classes (int): number of classes
Returns:
F (float): mean F score across all classes
Fpc (listof float): F score per class
"""
p = Pool(processes=num_proc)
batch_size = seg_mask.shape[0]
Fpc = np.zeros(num_classes)
Fc = np.zeros(num_classes)
for class_id in tqdm(range(num_classes)):
args = [((seg_mask[i] == class_id).astype(np.uint8),
(gt_mask[i] == class_id).astype(np.uint8),
gt_mask[i] == cfg.DATASET.IGNORE_LABEL,
bound_th)
for i in range(batch_size)]
temp = p.map(db_eval_boundary_wrapper, args)
temp = np.array(temp)
Fs = temp[:,0]
_valid = ~np.isnan(Fs)
Fc[class_id] = np.sum(_valid)
Fs[np.isnan(Fs)] = 0
Fpc[class_id] = sum(Fs)
return Fpc, Fc
#def db_eval_boundary_wrapper_wrapper(args):
# seg_mask, gt_mask, class_id, batch_size, Fpc = args
# print("class_id:" + str(class_id))
# p = Pool(processes=10)
# args = [((seg_mask[i] == class_id).astype(np.uint8),
# (gt_mask[i] == class_id).astype(np.uint8))
# for i in range(batch_size)]
# Fs = p.map(db_eval_boundary_wrapper, args)
# Fpc[class_id] = sum(Fs)
# return
def db_eval_boundary_wrapper(args):
foreground_mask, gt_mask, ignore, bound_th = args
return db_eval_boundary(foreground_mask, gt_mask,ignore, bound_th)
def db_eval_boundary(foreground_mask,gt_mask, ignore_mask,bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
#print(bound_pix)
#print(gt.shape)
#print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
fg_boundary = seg2bmap(foreground_mask);
gt_boundary = seg2bmap(gt_mask);
from skimage.morphology import binary_dilation,disk
fg_dil = binary_dilation(fg_boundary,disk(bound_pix))
gt_dil = binary_dilation(gt_boundary,disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
#% Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match)/float(n_fg)
recall = np.sum(gt_match)/float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2*precision*recall/(precision+recall);
return F, precision
def seg2bmap(seg,width=None,height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg>0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h,w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (width>w | height>h | abs(ar1-ar2)>0.01),\
'Can''t convert %dx%d seg to %dx%d bmap.'%(w,h,width,height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:,:-1] = seg[:,1:]
s[:-1,:] = seg[1:,:]
se[:-1,:-1] = seg[1:,1:]
b = seg^e | seg^s | seg^se
b[-1,:] = seg[-1,:]^e[-1,:]
b[:,-1] = seg[:,-1]^s[:,-1]
b[-1,-1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height,width))
for x in range(w):
for y in range(h):
if b[y,x]:
j = 1+floor((y-1)+height / h)
i = 1+floor((x-1)+width / h)
bmap[j,i] = 1;
return bmap
| semantic-segmentation-main | utils/f_boundary.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import torch
from config import cfg
from utils.misc import fast_hist, fmt_scale
from utils.misc import AverageMeter, eval_metrics
from utils.misc import metrics_per_image
from runx.logx import logx
def flip_tensor(x, dim):
"""
Flip Tensor along a dimension
"""
dim = x.dim() + dim if dim < 0 else dim
return x[tuple(slice(None, None) if i != dim
else torch.arange(x.size(i) - 1, -1, -1).long()
for i in range(x.dim()))]
def resize_tensor(inputs, target_size):
inputs = torch.nn.functional.interpolate(
inputs, size=target_size, mode='bilinear',
align_corners=cfg.MODEL.ALIGN_CORNERS)
return inputs
def calc_err_mask(pred, gtruth, num_classes, classid):
"""
calculate class-specific error masks
"""
# Class-specific error mask
class_mask = (gtruth >= 0) & (gtruth == classid)
fp = (pred == classid) & ~class_mask & (gtruth != cfg.DATASET.IGNORE_LABEL)
fn = (pred != classid) & class_mask
err_mask = fp | fn
return err_mask.astype(int)
def calc_err_mask_all(pred, gtruth, num_classes):
"""
calculate class-agnostic error masks
"""
# Class-specific error mask
mask = (gtruth >= 0) & (gtruth != cfg.DATASET.IGNORE_LABEL)
err_mask = mask & (pred != gtruth)
return err_mask.astype(int)
def eval_minibatch(data, net, criterion, val_loss, calc_metrics, args, val_idx):
"""
Evaluate a single minibatch of images.
* calculate metrics
* dump images
There are two primary multi-scale inference types:
1. 'MSCALE', or in-model multi-scale: where the multi-scale iteration loop is
handled within the model itself (see networks/mscale.py -> nscale_forward())
2. 'multi_scale_inference', where we use Averaging to combine scales
"""
torch.cuda.empty_cache()
scales = [args.default_scale]
if args.multi_scale_inference:
scales.extend([float(x) for x in args.extra_scales.split(',')])
if val_idx == 0:
logx.msg(f'Using multi-scale inference (AVGPOOL) with scales {scales}')
# input = torch.Size([1, 3, h, w])
# gt_image = torch.Size([1, h, w])
images, gt_image, img_names, scale_float = data
assert len(images.size()) == 4 and len(gt_image.size()) == 3
assert images.size()[2:] == gt_image.size()[1:]
batch_pixel_size = images.size(0) * images.size(2) * images.size(3)
input_size = images.size(2), images.size(3)
if args.do_flip:
# By ending with flip=0, we insure that the images that are dumped
# out correspond to the unflipped versions. A bit hacky.
flips = [1, 0]
else:
flips = [0]
with torch.no_grad():
output = 0.0
for flip in flips:
for scale in scales:
if flip == 1:
inputs = flip_tensor(images, 3)
else:
inputs = images
infer_size = [round(sz * scale) for sz in input_size]
if scale != 1.0:
inputs = resize_tensor(inputs, infer_size)
inputs = {'images': inputs, 'gts': gt_image}
inputs = {k: v.cuda() for k, v in inputs.items()}
# Expected Model outputs:
# required:
# 'pred' the network prediction, shape (1, 19, h, w)
#
# optional:
# 'pred_*' - multi-scale predictions from mscale model
# 'attn_*' - multi-scale attentions from mscale model
output_dict = net(inputs)
_pred = output_dict['pred']
# save AVGPOOL style multi-scale output for visualizing
if not cfg.MODEL.MSCALE:
scale_name = fmt_scale('pred', scale)
output_dict[scale_name] = _pred
# resize tensor down to 1.0x scale in order to combine
# with other scales of prediction
if scale != 1.0:
_pred = resize_tensor(_pred, input_size)
if flip == 1:
output = output + flip_tensor(_pred, 3)
else:
output = output + _pred
output = output / len(scales) / len(flips)
assert_msg = 'output_size {} gt_cuda size {}'
gt_cuda = gt_image.cuda()
assert_msg = assert_msg.format(
output.size()[2:], gt_cuda.size()[1:])
assert output.size()[2:] == gt_cuda.size()[1:], assert_msg
assert output.size()[1] == cfg.DATASET.NUM_CLASSES, assert_msg
# Update loss and scoring datastructure
if calc_metrics:
val_loss.update(criterion(output, gt_image.cuda()).item(),
batch_pixel_size)
output_data = torch.nn.functional.softmax(output, dim=1).cpu().data
max_probs, predictions = output_data.max(1)
# Assemble assets to visualize
assets = {}
for item in output_dict:
if 'attn_' in item:
assets[item] = output_dict[item]
if 'pred_' in item:
smax = torch.nn.functional.softmax(output_dict[item], dim=1)
_, pred = smax.data.max(1)
assets[item] = pred.cpu().numpy()
predictions = predictions.numpy()
assets['predictions'] = predictions
assets['prob_mask'] = max_probs
if calc_metrics:
assets['err_mask'] = calc_err_mask_all(predictions,
gt_image.numpy(),
cfg.DATASET.NUM_CLASSES)
_iou_acc = fast_hist(predictions.flatten(),
gt_image.numpy().flatten(),
cfg.DATASET.NUM_CLASSES)
return assets, _iou_acc
def validate_topn(val_loader, net, criterion, optim, epoch, args):
"""
Find worse case failures ...
Only single GPU for now
First pass = calculate TP, FP, FN pixels per image per class
Take these stats and determine the top20 images to dump per class
Second pass = dump all those selected images
"""
assert args.bs_val == 1
######################################################################
# First pass
######################################################################
logx.msg('First pass')
image_metrics = {}
net.eval()
val_loss = AverageMeter()
iou_acc = 0
for val_idx, data in enumerate(val_loader):
# Run network
assets, _iou_acc = \
run_minibatch(data, net, criterion, val_loss, True, args, val_idx)
# per-class metrics
input_images, labels, img_names, _ = data
fp, fn = metrics_per_image(_iou_acc)
img_name = img_names[0]
image_metrics[img_name] = (fp, fn)
iou_acc += _iou_acc
if val_idx % 20 == 0:
logx.msg(f'validating[Iter: {val_idx + 1} / {len(val_loader)}]')
if val_idx > 5 and args.test_mode:
break
eval_metrics(iou_acc, args, net, optim, val_loss, epoch)
######################################################################
# Find top 20 worst failures from a pixel count perspective
######################################################################
from collections import defaultdict
worst_images = defaultdict(dict)
class_to_images = defaultdict(dict)
for classid in range(cfg.DATASET.NUM_CLASSES):
tbl = {}
for img_name in image_metrics.keys():
fp, fn = image_metrics[img_name]
fp = fp[classid]
fn = fn[classid]
tbl[img_name] = fp + fn
worst = sorted(tbl, key=tbl.get, reverse=True)
for img_name in worst[:args.dump_topn]:
fail_pixels = tbl[img_name]
worst_images[img_name][classid] = fail_pixels
class_to_images[classid][img_name] = fail_pixels
msg = str(worst_images)
logx.msg(msg)
# write out per-gpu jsons
# barrier
# make single table
######################################################################
# 2nd pass
######################################################################
logx.msg('Second pass')
attn_map = None
for val_idx, data in enumerate(val_loader):
in_image, gt_image, img_names, _ = data
# Only process images that were identified in first pass
if not args.dump_topn_all and img_names[0] not in worst_images:
continue
with torch.no_grad():
inputs = in_image.cuda()
inputs = {'images': inputs, 'gts': gt_image}
if cfg.MODEL.MSCALE:
output, attn_map = net(inputs)
else:
output = net(inputs)
output = torch.nn.functional.softmax(output, dim=1)
prob_mask, predictions = output.data.max(1)
predictions = predictions.cpu()
# this has shape [bs, h, w]
img_name = img_names[0]
for classid in worst_images[img_name].keys():
err_mask = calc_err_mask(predictions.numpy(),
gt_image.numpy(),
cfg.DATASET.NUM_CLASSES,
classid)
class_name = cfg.DATASET_INST.trainid_to_name[classid]
error_pixels = worst_images[img_name][classid]
logx.msg(f'{img_name} {class_name}: {error_pixels}')
img_names = [img_name + f'_{class_name}']
to_dump = {'gt_images': gt_image,
'input_images': in_image,
'predictions': predictions.numpy(),
'err_mask': err_mask,
'prob_mask': prob_mask,
'img_names': img_names}
if attn_map is not None:
to_dump['attn_maps'] = attn_map
# FIXME!
# do_dump_images([to_dump])
html_fn = os.path.join(args.result_dir, 'best_images',
'topn_failures.html')
from utils.results_page import ResultsPage
ip = ResultsPage('topn failures', html_fn)
for classid in class_to_images:
class_name = cfg.DATASET_INST.trainid_to_name[classid]
img_dict = class_to_images[classid]
for img_name in sorted(img_dict, key=img_dict.get, reverse=True):
fail_pixels = class_to_images[classid][img_name]
img_cls = f'{img_name}_{class_name}'
pred_fn = f'{img_cls}_prediction.png'
gt_fn = f'{img_cls}_gt.png'
inp_fn = f'{img_cls}_input.png'
err_fn = f'{img_cls}_err_mask.png'
prob_fn = f'{img_cls}_prob_mask.png'
img_label_pairs = [(pred_fn, 'pred'),
(gt_fn, 'gt'),
(inp_fn, 'input'),
(err_fn, 'errors'),
(prob_fn, 'prob')]
ip.add_table(img_label_pairs,
table_heading=f'{class_name}-{fail_pixels}')
ip.write_page()
return val_loss.avg
| semantic-segmentation-main | utils/trnval_utils.py |
"""
# Code adapted from:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/parallel/data_parallel.py
#
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.s
"""
import operator
import torch
import warnings
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
def _check_balance(device_ids):
imbalance_warn = """
There is an imbalance between your GPUs. You may want to exclude GPU {} which
has less than 75% of the memory or cores of GPU {}. You can do so by setting
the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
environment variable."""
dev_props = [torch.cuda.get_device_properties(i) for i in device_ids]
def warn_imbalance(get_prop):
values = [get_prop(props) for props in dev_props]
min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
if min_val / max_val < 0.75:
warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
return True
return False
if warn_imbalance(lambda props: props.total_memory):
return
if warn_imbalance(lambda props: props.multi_processor_count):
return
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None, gather=True):
"""
Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
if gather:
return gather(outputs, output_device, dim)
else:
return outputs
class MyDataParallel(Module):
"""
Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used.
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All tensors will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
will be invoked ``len(device_ids)`` times, each with inputs located on
a particular device. Particularly, the hooks are only guaranteed to be
executed in correct order with respect to operations on corresponding
devices. For example, it is not guaranteed that hooks set via
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
that each such hook be executed before the corresponding
:meth:`~torch.nn.Module.forward` call of that device.
.. warning::
When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
:func:`forward`, this wrapper will return a vector of length equal to
number of devices used in data parallelism, containing the result from
each device.
.. note::
There is a subtlety in using the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
details.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Attributes:
module (Module): the module to be parallelized
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0, gather=True):
super(MyDataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
self.gather_bool = gather
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return [self.module(*inputs[0], **kwargs[0])]
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
if self.gather_bool:
return self.gather(outputs, self.output_device)
else:
return outputs
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
| semantic-segmentation-main | utils/my_data_parallel.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import glob
import os
import numpy as np
id2cat = {
0: 'road',
1: 'sidewalk',
2: 'building',
3: 'wall',
4: 'fence',
5: 'pole',
6: 'traffic_light',
7: 'traffic_sign',
8: 'vegetation',
9: 'terrain',
10: 'sky',
11: 'person',
12: 'rider',
13: 'car',
14: 'truck',
15: 'bus',
16: 'train',
17: 'motorcycle',
18: 'bicycle'}
# Leaderboard mapillary
sota_iu_results = {
0: 98.4046,
1: 85.0224,
2: 93.6462,
3: 61.7487,
4: 63.8885,
5: 67.6745,
6: 77.43,
7: 80.8351,
8: 93.7341,
9: 71.8774,
10: 95.6122,
11: 86.7228,
12: 72.7778,
13: 95.7033,
14: 79.9019,
15: 93.0954,
16: 89.7196,
17: 72.5731,
18: 78.2172,
255: 0}
class ResultsPage(object):
'''
This creates an HTML page of embedded images, useful for showing evaluation results.
Usage:
ip = ImagePage(html_fn)
# Add a table with N images ...
ip.add_table((img, descr), (img, descr), ...)
# Generate html page
ip.write_page()
'''
def __init__(self, experiment_name, html_filename):
self.experiment_name = experiment_name
self.html_filename = html_filename
self.outfile = open(self.html_filename, 'w')
self.items = []
def _print_header(self):
header = '''<!DOCTYPE html>
<html>
<head>
<title>Experiment = {}</title>
</head>
<body>'''.format(self.experiment_name)
self.outfile.write(header)
def _print_footer(self):
self.outfile.write(''' </body>
</html>''')
def _print_table_header(self, table_name):
table_hdr = ''' <h3>{}</h3>
<table border="1" style="table-layout: fixed;">
<tr>'''.format(table_name)
self.outfile.write(table_hdr)
def _print_table_footer(self):
table_ftr = ''' </tr>
</table>'''
self.outfile.write(table_ftr)
def _print_table_guts(self, img_fn, descr):
table = ''' <td halign="center" style="word-wrap: break-word;" valign="top">
<p>
<a href="{img_fn}">
<img src="{img_fn}" style="width:768px">
</a><br>
<p>{descr}</p>
</p>
</td>'''.format(img_fn=img_fn, descr=descr)
self.outfile.write(table)
def add_table(self, img_label_pairs, table_heading=''):
"""
:img_label_pairs: A list of pairs of [img,label]
"""
self.items.append([img_label_pairs, table_heading])
def _write_table(self, table, heading):
img, _descr = table[0]
self._print_table_header(heading)
for img, descr in table:
self._print_table_guts(img, descr)
self._print_table_footer()
def write_page(self):
self._print_header()
for table, heading in self.items:
self._write_table(table, heading)
self._print_footer()
def _print_page_start(self):
page_start = '''<!DOCTYPE html>
<html>
<head>
<title>Experiment = EXP_NAME </title>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>'''
self.outfile.write(page_start)
def _print_table_start(self, caption, hdr):
self.outfile.write('''<table style="width:100%">
<caption>{}</caption>
<tr>'''.format(caption))
for hdr_col in hdr:
self.outfile.write(' <th>{}</th>'.format(hdr_col))
self.outfile.write(' </tr>')
def _print_table_row(self, row):
self.outfile.write(' <tr>')
for i in row:
self.outfile.write(' <td>{}</td>'.format(i))
# Create Links
fp_link = '<a href="{}_fp.html">false positive Top N</a>'.format(row[
1])
fn_link = '<a href="{}_fn.html">false_negative Top N</a>'.format(row[
1])
self.outfile.write(' <td>{}</td>'.format(fp_link))
self.outfile.write(' <td>{}</td>'.format(fn_link))
self.outfile.write(' </tr>')
def _print_table_end(self):
self.outfile.write('</table>')
def _print_page_end(self):
self.outfile.write('''
</body>
</html>''')
def create_main(self, iu, hist):
self._print_page_start()
#_print_table_style()
# Calculate all of the terms:
iu_false_positive = hist.sum(axis=1) - np.diag(hist)
iu_false_negative = hist.sum(axis=0) - np.diag(hist)
iu_true_positive = np.diag(hist)
hdr = ("Class ID", "Class", "IoU", "Sota-IU", "TP",
"FP", "FN", "precision", "recall", "", "")
self._print_table_start("Mean IoU Results", hdr)
for iu_score, index in iu:
class_name = id2cat[index]
iu_string = '{:5.2f}'.format(iu_score * 100)
total_pixels = hist.sum()
tp = '{:5.2f}'.format(100 * iu_true_positive[index] / total_pixels)
fp = '{:5.2f}'.format(
iu_false_positive[index] / iu_true_positive[index])
fn = '{:5.2f}'.format(
iu_false_negative[index] / iu_true_positive[index])
precision = '{:5.2f}'.format(
iu_true_positive[index] / (iu_true_positive[index] + iu_false_positive[index]))
recall = '{:5.2f}'.format(
iu_true_positive[index] / (iu_true_positive[index] + iu_false_negative[index]))
sota = '{:5.2f}'.format(sota_iu_results[index])
row = (index, class_name, iu_string, sota,
tp, fp, fn, precision, recall)
self._print_table_row(row)
self._print_table_end()
self._print_page_end()
def main():
images = glob.glob('dump_imgs_train/*.png')
images = [i for i in images if 'mask' not in i]
ip = ResultsPage('test page', 'dd.html')
for img in images:
basename = os.path.splitext(img)[0]
mask_img = basename + '_mask.png'
ip.add_table(((img, 'image'), (mask_img, 'mask')))
ip.write_page()
| semantic-segmentation-main | utils/results_page.py |
"""
# Code borrowded from:
# https://github.com/zijundeng/pytorch-semantic-segmentation/blob/master/utils/transforms.py
#
#
# MIT License
#
# Copyright (c) 2017 ZijunDeng
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
"""
Standard Transform
"""
import random
import numpy as np
from skimage.filters import gaussian
from skimage.restoration import denoise_bilateral
import torch
from PIL import Image, ImageEnhance
import torchvision.transforms as torch_tr
from config import cfg
from scipy.ndimage.interpolation import shift
from skimage.segmentation import find_boundaries
try:
import accimage
except ImportError:
accimage = None
class RandomVerticalFlip(object):
def __call__(self, img):
if random.random() < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class MaskToTensor(object):
def __call__(self, img, blockout_predefined_area=False):
return torch.from_numpy(np.array(img, dtype=np.int32)).long()
class RelaxedBoundaryLossToTensor(object):
"""
Boundary Relaxation
"""
def __init__(self,ignore_id, num_classes):
self.ignore_id=ignore_id
self.num_classes= num_classes
def new_one_hot_converter(self,a):
ncols = self.num_classes+1
out = np.zeros( (a.size,ncols), dtype=np.uint8)
out[np.arange(a.size),a.ravel()] = 1
out.shape = a.shape + (ncols,)
return out
def __call__(self,img):
img_arr = np.array(img)
img_arr[img_arr==self.ignore_id]=self.num_classes
if cfg.STRICTBORDERCLASS != None:
one_hot_orig = self.new_one_hot_converter(img_arr)
mask = np.zeros((img_arr.shape[0],img_arr.shape[1]))
for cls in cfg.STRICTBORDERCLASS:
mask = np.logical_or(mask,(img_arr == cls))
one_hot = 0
border = cfg.BORDER_WINDOW
if (cfg.REDUCE_BORDER_EPOCH !=-1 and cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH):
border = border // 2
border_prediction = find_boundaries(img_arr, mode='thick').astype(np.uint8)
for i in range(-border,border+1):
for j in range(-border, border+1):
shifted= shift(img_arr,(i,j), cval=self.num_classes)
one_hot += self.new_one_hot_converter(shifted)
one_hot[one_hot>1] = 1
if cfg.STRICTBORDERCLASS != None:
one_hot = np.where(np.expand_dims(mask,2), one_hot_orig, one_hot)
one_hot = np.moveaxis(one_hot,-1,0)
if (cfg.REDUCE_BORDER_EPOCH !=-1 and cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH):
one_hot = np.where(border_prediction,2*one_hot,1*one_hot)
# print(one_hot.shape)
return torch.from_numpy(one_hot).byte()
class ResizeHeight(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.target_h = size
self.interpolation = interpolation
def __call__(self, img):
w, h = img.size
target_w = int(w / h * self.target_h)
return img.resize((target_w, self.target_h), self.interpolation)
class FreeScale(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = tuple(reversed(size)) # size: (h, w)
self.interpolation = interpolation
def __call__(self, img):
return img.resize(self.size, self.interpolation)
class FlipChannels(object):
"""
Flip around the x-axis
"""
def __call__(self, img):
img = np.array(img)[:, :, ::-1]
return Image.fromarray(img.astype(np.uint8))
class RandomGaussianBlur(object):
"""
Apply Gaussian Blur
"""
def __call__(self, img):
sigma = 0.15 + random.random() * 1.15
blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True)
blurred_img *= 255
return Image.fromarray(blurred_img.astype(np.uint8))
class RandomBrightness(object):
def __call__(self, img):
if random.random() < 0.5:
return img
v = random.uniform(0.1, 1.9)
return ImageEnhance.Brightness(img).enhance(v)
class RandomBilateralBlur(object):
"""
Apply Bilateral Filtering
"""
def __call__(self, img):
sigma = random.uniform(0.05, 0.75)
blurred_img = denoise_bilateral(np.array(img), sigma_spatial=sigma, multichannel=True)
blurred_img *= 255
return Image.fromarray(blurred_img.astype(np.uint8))
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See https://en.wikipedia.org/wiki/Hue for more details on Hue.
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(
torch_tr.Lambda(lambda img: adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(
torch_tr.Lambda(lambda img: adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(
torch_tr.Lambda(lambda img: adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(
torch_tr.Lambda(lambda img: adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = torch_tr.Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
| semantic-segmentation-main | transforms/transforms.py |
semantic-segmentation-main | transforms/__init__.py |
|
"""
# Code borrowded from:
# https://github.com/zijundeng/pytorch-semantic-segmentation/blob/master/utils/joint_transforms.py
#
#
# MIT License
#
# Copyright (c) 2017 ZijunDeng
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
import math
import numbers
from PIL import Image, ImageOps
import numpy as np
import random
from config import cfg
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert img.size == mask.size
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
def add_margin(pil_img, top, right, bottom, left, margin_color):
"""
Add margin around an image
top, right, bottom, left are the margin widths, in pixels
margin_color is what to use for the margins
"""
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), margin_color)
result.paste(pil_img, (left, top))
return result
def set_crop_size(crop_size):
if isinstance(crop_size, (list, tuple)):
size = crop_size
elif isinstance(crop_size, numbers.Number):
size = (int(crop_size), int(crop_size))
else:
raise
return size
class RandomCrop(object):
"""
Take a random crop from the image.
First the image or crop size may need to be adjusted if the incoming image
is too small...
If the image is smaller than the crop, then:
the image is padded up to the size of the crop
unless 'nopad', in which case the crop size is shrunk to fit the image
A random crop is taken such that the crop fits within the image.
if cfg.DATASET.TRANSLATION_AUG_FIX is set, we insure that there's always
translation randomness of at least that value around the image.
if image < crop_size:
# slide crop within image, random offset
else:
# slide image within crop
"""
def __init__(self, crop_size, nopad=True):
self.size = set_crop_size(crop_size)
self.ignore_index = cfg.DATASET.IGNORE_LABEL
self.nopad = nopad
self.pad_color = (0, 0, 0)
@staticmethod
def crop_in_image(centroid, target_w, target_h, w, h, img, mask):
if centroid is not None:
# Need to insure that centroid is covered by crop and that crop
# sits fully within the image
c_x, c_y = centroid
max_x = w - target_w
max_y = h - target_h
x1 = random.randint(c_x - target_w, c_x)
x1 = min(max_x, max(0, x1))
y1 = random.randint(c_y - target_h, c_y)
y1 = min(max_y, max(0, y1))
else:
if w == target_w:
x1 = 0
else:
x1 = random.randint(0, w - target_w)
if h == target_h:
y1 = 0
else:
y1 = random.randint(0, h - target_h)
return [img.crop((x1, y1, x1 + target_w, y1 + target_h)),
mask.crop((x1, y1, x1 + target_w, y1 + target_h))]
def image_in_crop(self, target_w, target_h, w, h, img, mask):
# image smaller than crop, so slide image within crop
x_total_margin = target_w - w
y_total_margin = target_h - h
left = random.randint(0, x_total_margin)
right = x_total_margin - left
top = random.randint(0, y_total_margin)
bottom = y_total_margin - top
slid_image = add_margin(img, top, right, bottom, left,
self.pad_color)
slid_mask = add_margin(mask, top, right, bottom, left,
self.ignore_index)
return [slid_image, slid_mask]
def __call__(self, img, mask, centroid=None):
assert img.size == mask.size
w, h = img.size
target_h, target_w = self.size # ASSUME H, W
if w == target_w and h == target_h:
return [img, mask]
if cfg.DATASET.TRANSLATE_AUG_FIX:
if w < target_w and h < target_h:
return self.image_in_crop(target_w, target_h, w, h, img, mask)
else:
return self.crop_in_image(centroid, target_w, target_h, w, h,
img, mask)
if self.nopad:
# Shrink crop size if image < crop
if target_h > h or target_w > w:
shorter_side = min(w, h)
target_h, target_w = shorter_side, shorter_side
else:
# Pad image if image < crop
if target_h > h:
pad_h = (target_h - h) // 2 + 1
else:
pad_h = 0
if target_w > w:
pad_w = (target_w - w) // 2 + 1
else:
pad_w = 0
border = (pad_w, pad_h, pad_w, pad_h)
if pad_h or pad_w:
img = ImageOps.expand(img, border=border, fill=self.pad_color)
mask = ImageOps.expand(mask, border=border,
fill=self.ignore_index)
w, h = img.size
return self.crop_in_image(centroid, target_w, target_h, w, h,
img, mask)
class ResizeHeight(object):
def __init__(self, size, interpolation=Image.BICUBIC):
self.target_h = size
self.interpolation = interpolation
def __call__(self, img, mask):
w, h = img.size
target_w = int(w / h * self.target_h)
return (img.resize((target_w, self.target_h), self.interpolation),
mask.resize((target_w, self.target_h), Image.NEAREST))
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class CenterCropPad(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.ignore_index = cfg.DATASET.IGNORE_LABEL
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if isinstance(self.size, tuple):
tw, th = self.size[0], self.size[1]
else:
th, tw = self.size, self.size
if w < tw:
pad_x = tw - w
else:
pad_x = 0
if h < th:
pad_y = th - h
else:
pad_y = 0
if pad_x or pad_y:
# left, top, right, bottom
img = ImageOps.expand(img, border=(pad_x, pad_y, pad_x, pad_y), fill=0)
mask = ImageOps.expand(mask, border=(pad_x, pad_y, pad_x, pad_y),
fill=self.ignore_index)
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class PadImage(object):
def __init__(self, size):
self.size = size
self.ignore_index = cfg.DATASET.IGNORE_LABEL
def __call__(self, img, mask):
assert img.size == mask.size
th, tw = self.size, self.size
w, h = img.size
if w > tw or h > th :
wpercent = (tw/float(w))
target_h = int((float(img.size[1])*float(wpercent)))
img, mask = img.resize((tw, target_h), Image.BICUBIC), mask.resize((tw, target_h), Image.NEAREST)
w, h = img.size
##Pad
img = ImageOps.expand(img, border=(0,0,tw-w, th-h), fill=0)
mask = ImageOps.expand(mask, border=(0,0,tw-w, th-h), fill=self.ignore_index)
return img, mask
class RandomHorizontallyFlip(object):
def __call__(self, img, mask):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(
Image.FLIP_LEFT_RIGHT)
return img, mask
class FreeScale(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w)
def __call__(self, img, mask):
assert img.size == mask.size
return img.resize(self.size, Image.BICUBIC), mask.resize(self.size, Image.NEAREST)
class Scale(object):
"""
Scale image such that longer side is == size
"""
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if w > h:
long_edge = w
else:
long_edge = h
if long_edge == self.size:
return img, mask
scale = self.size / long_edge
target_w = int(w * scale)
target_h = int(h * scale)
target_size = (target_w, target_h)
return img.resize(target_size, Image.BILINEAR), \
mask.resize(target_size, Image.NEAREST)
class new_Scale(object):
"""
Scale image such that longer side is == size
"""
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if (w >= h and w == self.size) or (h >= w and h == self.size):
return img, mask
if w > h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), Image.BICUBIC), mask.resize(
(ow, oh), Image.NEAREST)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), Image.BICUBIC), mask.resize(
(ow, oh), Image.NEAREST)
class ScaleMin(object):
"""
Scale image such that shorter side is == size
"""
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img, mask
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), Image.BICUBIC), mask.resize(
(ow, oh), Image.NEAREST)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), Image.BICUBIC), mask.resize(
(ow, oh), Image.NEAREST)
class Resize(object):
"""
Resize image to exact size of crop
"""
def __init__(self, crop_size):
self.size = set_crop_size(crop_size)
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if (w == h and w == self.size):
return img, mask
return (img.resize(self.size, Image.BICUBIC),
mask.resize(self.size, Image.NEAREST))
class RandomSizedCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.45, 1.0) * area
aspect_ratio = random.uniform(0.5, 2)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
mask = mask.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), Image.BICUBIC),\
mask.resize((self.size, self.size), Image.NEAREST)
# Fallback
scale = Scale(self.size)
crop = CenterCrop(self.size)
return crop(*scale(img, mask))
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = random.random() * 2 * self.degree - self.degree
return img.rotate(rotate_degree, Image.BICUBIC), mask.rotate(
rotate_degree, Image.NEAREST)
class RandomSizeAndCrop(object):
def __init__(self, crop_size, crop_nopad,
scale_min=0.5, scale_max=2.0, full_size=False,
pre_size=None):
self.crop = RandomCrop(crop_size, nopad=crop_nopad)
self.scale_min = scale_min
self.scale_max = scale_max
self.full_size = full_size
self.pre_size = pre_size
def __call__(self, img, mask, centroid=None):
assert img.size == mask.size
scale_amt = random.uniform(self.scale_min, self.scale_max)
if self.pre_size is not None:
in_w, in_h = img.size
# find long edge
if in_w > in_h:
# long is width
pre_scale = self.pre_size / in_w
else:
pre_scale = self.pre_size / in_h
scale_amt *= pre_scale
if self.full_size:
self.crop.size = img.size[1], img.size[0]
w, h = [int(i * scale_amt) for i in img.size]
if centroid is not None:
centroid = [int(c * scale_amt) for c in centroid]
resized_img, resized_mask = (img.resize((w, h), Image.BICUBIC),
mask.resize((w, h), Image.NEAREST))
img_mask = self.crop(resized_img, resized_mask, centroid)
img_mask.append(scale_amt)
return img_mask
class SlidingCropOld(object):
def __init__(self, crop_size, stride_rate):
self.crop_size = crop_size
self.stride_rate = stride_rate
self.ignore_label = cfg.DATASET.IGNORE_LABEL
def _pad(self, img, mask):
h, w = img.shape[: 2]
pad_h = max(self.crop_size - h, 0)
pad_w = max(self.crop_size - w, 0)
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant')
mask = np.pad(mask, ((0, pad_h), (0, pad_w)), 'constant',
constant_values=self.ignore_label)
return img, mask
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
long_size = max(h, w)
img = np.array(img)
mask = np.array(mask)
if long_size > self.crop_size:
stride = int(math.ceil(self.crop_size * self.stride_rate))
h_step_num = int(math.ceil((h - self.crop_size) / float(stride))) + 1
w_step_num = int(math.ceil((w - self.crop_size) / float(stride))) + 1
img_sublist, mask_sublist = [], []
for yy in range(h_step_num):
for xx in range(w_step_num):
sy, sx = yy * stride, xx * stride
ey, ex = sy + self.crop_size, sx + self.crop_size
img_sub = img[sy: ey, sx: ex, :]
mask_sub = mask[sy: ey, sx: ex]
img_sub, mask_sub = self._pad(img_sub, mask_sub)
img_sublist.append(
Image.fromarray(
img_sub.astype(
np.uint8)).convert('RGB'))
mask_sublist.append(
Image.fromarray(
mask_sub.astype(
np.uint8)).convert('P'))
return img_sublist, mask_sublist
else:
img, mask = self._pad(img, mask)
img = Image.fromarray(img.astype(np.uint8)).convert('RGB')
mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
return img, mask
class SlidingCrop(object):
def __init__(self, crop_size, stride_rate):
self.crop_size = crop_size
self.stride_rate = stride_rate
self.ignore_label = cfg.DATASET.IGNORE_LABEL
def _pad(self, img, mask):
h, w = img.shape[: 2]
pad_h = max(self.crop_size - h, 0)
pad_w = max(self.crop_size - w, 0)
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant')
mask = np.pad(mask, ((0, pad_h), (0, pad_w)), 'constant',
constant_values=self.ignore_label)
return img, mask, h, w
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
long_size = max(h, w)
img = np.array(img)
mask = np.array(mask)
if long_size > self.crop_size:
stride = int(math.ceil(self.crop_size * self.stride_rate))
h_step_num = int(math.ceil((h - self.crop_size) / float(stride))) + 1
w_step_num = int(math.ceil((w - self.crop_size) / float(stride))) + 1
img_slices, mask_slices, slices_info = [], [], []
for yy in range(h_step_num):
for xx in range(w_step_num):
sy, sx = yy * stride, xx * stride
ey, ex = sy + self.crop_size, sx + self.crop_size
img_sub = img[sy: ey, sx: ex, :]
mask_sub = mask[sy: ey, sx: ex]
img_sub, mask_sub, sub_h, sub_w = self._pad(img_sub, mask_sub)
img_slices.append(
Image.fromarray(
img_sub.astype(
np.uint8)).convert('RGB'))
mask_slices.append(
Image.fromarray(
mask_sub.astype(
np.uint8)).convert('P'))
slices_info.append([sy, ey, sx, ex, sub_h, sub_w])
return img_slices, mask_slices, slices_info
else:
img, mask, sub_h, sub_w = self._pad(img, mask)
img = Image.fromarray(img.astype(np.uint8)).convert('RGB')
mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
return [img], [mask], [[0, sub_h, 0, sub_w, sub_h, sub_w]]
class _ClassUniform(object):
def __init__(self, size, crop_nopad, scale_min=0.5, scale_max=2.0, ignore_index=0,
class_list=[16, 15, 14]):
"""
This is the initialization for class uniform sampling
:param size: crop size (int)
:param crop_nopad: Padding or no padding (bool)
:param scale_min: Minimum Scale (float)
:param scale_max: Maximum Scale (float)
:param ignore_index: The index value to ignore in the GT images (unsigned int)
:param class_list: A list of class to sample around, by default Truck, train, bus
"""
self.size = size
self.crop = RandomCrop(self.size, ignore_index=ignore_index, nopad=crop_nopad)
self.class_list = class_list.replace(" ", "").split(",")
self.scale_min = scale_min
self.scale_max = scale_max
def detect_peaks(self, image):
"""
Takes an image and detect the peaks usingthe local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise)
:param image: An 2d input images
:return: Binary output images of the same size as input with pixel value equal
to 1 indicating that there is peak at that point
"""
# define an 8-connected neighborhood
neighborhood = generate_binary_structure(2, 2)
# apply the local maximum filter; all pixel of maximal value
# in their neighborhood are set to 1
local_max = maximum_filter(image, footprint=neighborhood) == image
# local_max is a mask that contains the peaks we are
# looking for, but also the background.
# In order to isolate the peaks we must remove the background from the mask.
# we create the mask of the background
background = (image == 0)
# a little technicality: we must erode the background in order to
# successfully subtract it form local_max, otherwise a line will
# appear along the background border (artifact of the local maximum filter)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# we obtain the final mask, containing only peaks,
# by removing the background from the local_max mask (xor operation)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def __call__(self, img, mask):
"""
:param img: PIL Input Image
:param mask: PIL Input Mask
:return: PIL output PIL (mask, crop) of self.crop_size
"""
assert img.size == mask.size
scale_amt = random.uniform(self.scale_min, self.scale_max)
w = int(scale_amt * img.size[0])
h = int(scale_amt * img.size[1])
if scale_amt < 1.0:
img, mask = img.resize((w, h), Image.BICUBIC), mask.resize((w, h),
Image.NEAREST)
return self.crop(img, mask)
else:
# Smart Crop ( Class Uniform's ABN)
origw, origh = mask.size
img_new, mask_new = \
img.resize((w, h), Image.BICUBIC), mask.resize((w, h), Image.NEAREST)
interested_class = self.class_list # [16, 15, 14] # Train, Truck, Bus
data = np.array(mask)
arr = np.zeros((1024, 2048))
for class_of_interest in interested_class:
# hist = np.histogram(data==class_of_interest)
map = np.where(data == class_of_interest, data, 0)
map = map.astype('float64') / map.sum() / class_of_interest
map[np.isnan(map)] = 0
arr = arr + map
origarr = arr
window_size = 250
# Given a list of classes of interest find the points on the image that are
# of interest to crop from
sum_arr = np.zeros((1024, 2048)).astype('float32')
tmp = np.zeros((1024, 2048)).astype('float32')
for x in range(0, arr.shape[0] - window_size, window_size):
for y in range(0, arr.shape[1] - window_size, window_size):
sum_arr[int(x + window_size / 2), int(y + window_size / 2)] = origarr[
x:x + window_size,
y:y + window_size].sum()
tmp[x:x + window_size, y:y + window_size] = \
origarr[x:x + window_size, y:y + window_size].sum()
# Scaling Ratios in X and Y for non-uniform images
ratio = (float(origw) / w, float(origh) / h)
output = self.detect_peaks(sum_arr)
coord = (np.column_stack(np.where(output))).tolist()
# Check if there are any peaks in the images to crop from if not do standard
# cropping behaviour
if len(coord) == 0:
return self.crop(img_new, mask_new)
else:
# If peaks are detected, random peak selection followed by peak
# coordinate scaling to new scaled image and then random
# cropping around the peak point in the scaled image
randompick = np.random.randint(len(coord))
y, x = coord[randompick]
y, x = int(y * ratio[0]), int(x * ratio[1])
window_size = window_size * ratio[0]
cropx = random.uniform(
max(0, (x - window_size / 2) - (self.size - window_size)),
max((x - window_size / 2), (x - window_size / 2) - (
(w - window_size) - x + window_size / 2)))
cropy = random.uniform(
max(0, (y - window_size / 2) - (self.size - window_size)),
max((y - window_size / 2), (y - window_size / 2) - (
(h - window_size) - y + window_size / 2)))
return_img = img_new.crop(
(cropx, cropy, cropx + self.size, cropy + self.size))
return_mask = mask_new.crop(
(cropx, cropy, cropx + self.size, cropy + self.size))
return (return_img, return_mask)
| semantic-segmentation-main | transforms/joint_transforms.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from flowtron_plotting_utils import plot_alignment_to_numpy
from flowtron_plotting_utils import plot_gate_outputs_to_numpy
class FlowtronLogger(SummaryWriter):
def __init__(self, logdir):
super(FlowtronLogger, self).__init__(logdir)
def log_training(self, loss, learning_rate, iteration):
self.add_scalar("training/loss", loss, iteration)
self.add_scalar("learning_rate", learning_rate, iteration)
def log_validation(self, loss, loss_nll, loss_gate, loss_ctc,
attns, gate_pred, gate_out, iteration):
self.add_scalar("validation/loss", loss, iteration)
self.add_scalar("validation/loss_nll", loss_nll, iteration)
self.add_scalar("validation/loss_gate", loss_gate, iteration)
self.add_scalar("validation/loss_ctc", loss_ctc, iteration)
idx = random.randint(0, len(gate_out) - 1)
for i in range(len(attns)):
self.add_image(
'attention_weights_{}'.format(i),
plot_alignment_to_numpy(attns[i][idx].data.cpu().numpy().T),
iteration,
dataformats='HWC')
if gate_pred is not None:
gate_pred = gate_pred.transpose(0, 1)[:, :, 0]
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_out[idx].data.cpu().numpy(),
torch.sigmoid(gate_pred[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| flowtron-master | flowtron_logger.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
from typing import Optional
import numpy as np
import torch
from torch import nn, Tensor
from torch.nn import functional as F
def get_gate_mask_from_lengths(lengths):
"""Constructs binary mask from a 1D torch tensor of input lengths
Args:
lengths (torch.tensor): 1D tensor
Returns:
mask (torch.tensor): num_sequences x max_length x 1 binary tensor
"""
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def get_mask_from_lengths(lengths):
"""Constructs binary mask from a 1D torch tensor of input lengths
Args:
lengths (torch.tensor): 1D tensor
Returns:
mask (torch.tensor): num_sequences x max_length x 1 binary tensor
"""
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def masked_instance_norm(input: Tensor, mask: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor],
weight: Optional[Tensor], bias: Optional[Tensor], use_input_stats: bool,
momentum: float, eps: float = 1e-5) -> Tensor:
r"""Applies Masked Instance Normalization for each channel in each data sample in a batch.
See :class:`~MaskedInstanceNorm1d` for details.
"""
if not use_input_stats and (running_mean is None or running_var is None):
raise ValueError('Expected running_mean and running_var to be not None when use_input_stats=False')
shape = input.shape
b, c = shape[:2]
num_dims = len(shape[2:])
_dims = tuple(range(-num_dims, 0))
_slice = (...,) + (None,) * num_dims
running_mean_ = running_mean[None, :].repeat(b, 1) if running_mean is not None else None
running_var_ = running_var[None, :].repeat(b, 1) if running_mean is not None else None
if use_input_stats:
lengths = mask.sum(_dims)
mean = (input * mask).sum(_dims) / lengths # (N, C)
var = (((input - mean[_slice]) * mask) ** 2).sum(_dims) / lengths # (N, C)
if running_mean is not None:
running_mean_.mul_(1 - momentum).add_(momentum * mean.detach())
running_mean.copy_(running_mean_.view(b, c).mean(0, keepdim=False))
if running_var is not None:
running_var_.mul_(1 - momentum).add_(momentum * var.detach())
running_var.copy_(running_var_.view(b, c).mean(0, keepdim=False))
else:
mean, var = running_mean_.view(b, c), running_var_.view(b, c)
out = (input - mean[_slice]) / torch.sqrt(var[_slice] + eps) # (N, C, ...)
if weight is not None and bias is not None:
out = out * weight[None, :][_slice] + bias[None, :][_slice]
return out
class MaskedInstanceNorm1d(nn.InstanceNorm1d):
r"""Applies Instance Normalization over a masked 3D input
(a mini-batch of 1D inputs with additional channel dimension)..
See documentation of :class:`~torch.nn.InstanceNorm1d` for details.
Shape:
- Input: :math:`(N, C, L)`
- Mask: :math:`(N, 1, L)`
- Output: :math:`(N, C, L)` (same shape as input)
"""
def __init__(self, num_features: int, eps: float = 1e-5, momentum: float = 0.1,
affine: bool = False, track_running_stats: bool = False) -> None:
super(MaskedInstanceNorm1d, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, input: Tensor, mask: Tensor = None) -> Tensor:
self._check_input_dim(input)
if mask is not None:
self._check_input_dim(mask)
if mask is None:
return F.instance_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps
)
else:
return masked_instance_norm(
input, mask, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps
)
class AttentionConditioningLayer(nn.Module):
"""Adapted from the LocationLayer in
https://github.com/NVIDIA/tacotron2/blob/master/model.py
1D Conv model over a concatenation of the previous attention and the
accumulated attention values """
def __init__(self, input_dim=2, attention_n_filters=32,
attention_kernel_sizes=[5, 3], attention_dim=640):
super(AttentionConditioningLayer, self).__init__()
self.location_conv_hidden = ConvNorm(
input_dim, attention_n_filters,
kernel_size=attention_kernel_sizes[0], padding=None, bias=True,
stride=1, dilation=1, w_init_gain='relu')
self.location_conv_out = ConvNorm(
attention_n_filters, attention_dim,
kernel_size=attention_kernel_sizes[1], padding=None, bias=True,
stride=1, dilation=1, w_init_gain='sigmoid')
self.conv_layers = nn.Sequential(self.location_conv_hidden,
nn.ReLU(),
self.location_conv_out,
nn.Sigmoid())
def forward(self, attention_weights_cat):
return self.conv_layers(attention_weights_cat)
class AttentionCTCLoss(torch.nn.Module):
def __init__(self, blank_logprob=-1):
super(AttentionCTCLoss, self).__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.blank_logprob = blank_logprob
self.CTCLoss = nn.CTCLoss(zero_infinity=True)
def forward(self, attn, in_lens, out_lens, attn_logprob):
assert attn_logprob is not None
key_lens = in_lens
query_lens = out_lens
attn_logprob_padded = F.pad(input=attn_logprob,
pad=(1, 0, 0, 0, 0, 0, 0, 0),
value=self.blank_logprob)
cost_total = 0.0
for bid in range(attn_logprob.shape[0]):
target_seq = torch.arange(1, key_lens[bid]+1).unsqueeze(0)
curr_logprob = attn_logprob_padded[bid].permute(1, 0, 2)[
:query_lens[bid],
:,
:key_lens[bid]+1]
curr_logprob = self.log_softmax(curr_logprob[None])[0]
ctc_cost = self.CTCLoss(curr_logprob, target_seq,
input_lengths=query_lens[bid:bid+1],
target_lengths=key_lens[bid:bid+1])
cost_total += ctc_cost
cost = cost_total/attn_logprob.shape[0]
return cost
class FlowtronLoss(torch.nn.Module):
def __init__(self, sigma=1.0, gm_loss=False, gate_loss=True,
use_ctc_loss=False, ctc_loss_weight=0.0,
blank_logprob=-1):
super(FlowtronLoss, self).__init__()
self.sigma = sigma
self.gate_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.gm_loss = gm_loss
self.gate_loss = gate_loss
self.use_ctc_loss = use_ctc_loss
self.ctc_loss_weight = ctc_loss_weight
self.blank_logprob = blank_logprob
self.attention_loss = AttentionCTCLoss(
blank_logprob=self.blank_logprob)
def forward(self, model_output, gate_target,
in_lengths, out_lengths, is_validation=False):
z, log_s_list, gate_pred, attn_list, attn_logprob_list, \
mean, log_var, prob = model_output
# create mask for outputs computed on padded data
mask = get_mask_from_lengths(out_lengths).transpose(0, 1)[..., None]
mask_inverse = ~mask
mask, mask_inverse = mask.float(), mask_inverse.float()
n_mel_dims = z.size(2)
n_elements = mask.sum()
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s * mask)
else:
log_s_total = log_s_total + torch.sum(log_s * mask)
if self.gm_loss:
mask = mask[..., None] # T, B, 1, Dummy
z = z[..., None] # T, B, Mel, Dummy
mean = mean[None] # Dummy, Dummy or B, Mel, Components
log_var = log_var[None] # Dummy, Dummy or B, Mel, Components
prob = prob[None, :, None] # Dummy, B, Dummy, Components
_z = -(z - mean)**2 / (2 * torch.exp(log_var))
_zmax = _z.max(dim=3, keepdim=True)[0] # T, B, 80, Dummy
_z = prob * torch.exp(_z - _zmax) / torch.sqrt(torch.exp(log_var))
_z = _zmax + torch.log(torch.sum(_z, dim=3, keepdim=True))
nll = -torch.sum(mask * _z)
loss = nll - log_s_total
mask = mask[..., 0]
else:
z = z * mask
loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total
loss = loss / (n_elements * n_mel_dims)
gate_loss = torch.zeros(1, device=z.device)
if self.gate_loss > 0:
gate_pred = (gate_pred * mask)
gate_pred = gate_pred[..., 0].permute(1, 0)
gate_loss = self.gate_criterion(gate_pred, gate_target)
gate_loss = gate_loss.permute(1, 0) * mask[:, :, 0]
gate_loss = gate_loss.sum() / n_elements
loss_ctc = torch.zeros_like(gate_loss, device=z.device)
if self.use_ctc_loss:
for cur_flow_idx, flow_attn in enumerate(attn_list):
cur_attn_logprob = attn_logprob_list[cur_flow_idx]
# flip and send log probs for back step
if cur_flow_idx % 2 != 0:
if cur_attn_logprob is not None:
for k in range(cur_attn_logprob.size(0)):
cur_attn_logprob[k] = cur_attn_logprob[k].roll(
-out_lengths[k].item(),
dims=0)
cur_attn_logprob = torch.flip(cur_attn_logprob, (1, ))
cur_flow_ctc_loss = self.attention_loss(
flow_attn.unsqueeze(1),
in_lengths,
out_lengths,
attn_logprob=cur_attn_logprob.unsqueeze(1))
# flip the logprob back to be in backward direction
if cur_flow_idx % 2 != 0:
if cur_attn_logprob is not None:
cur_attn_logprob = torch.flip(cur_attn_logprob, (1, ))
for k in range(cur_attn_logprob.size(0)):
cur_attn_logprob[k] = cur_attn_logprob[k].roll(
out_lengths[k].item(),
dims=0)
loss_ctc += cur_flow_ctc_loss
# make CTC loss independent of number of flows by taking mean
loss_ctc = loss_ctc / float(len(attn_list))
return loss, gate_loss, loss_ctc
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class GaussianMixture(torch.nn.Module):
def __init__(self, n_hidden, n_components, n_mel_channels, fixed_gaussian,
mean_scale):
super(GaussianMixture, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_components = n_components
self.fixed_gaussian = fixed_gaussian
self.mean_scale = mean_scale
# TODO: fuse into one dense n_components * 3
self.prob_layer = LinearNorm(n_hidden, n_components)
if not fixed_gaussian:
self.mean_layer = LinearNorm(
n_hidden, n_mel_channels * n_components)
self.log_var_layer = LinearNorm(
n_hidden, n_mel_channels * n_components)
else:
mean = self.generate_mean(n_mel_channels, n_components, mean_scale)
log_var = self.generate_log_var(n_mel_channels, n_components)
self.register_buffer('mean', mean.float())
self.register_buffer('log_var', log_var.float())
def generate_mean(self, n_dimensions, n_components, scale=3):
means = torch.eye(n_dimensions).float()
ids = np.random.choice(
range(n_dimensions), n_components, replace=False)
means = means[ids] * scale
means = means.transpose(0, 1)
means = means[None]
return means
def generate_log_var(self, n_dimensions, n_components):
log_var = torch.zeros(1, n_dimensions, n_components).float()
return log_var
def generate_prob(self):
return torch.ones(1, 1).float()
def forward(self, outputs, bs):
prob = torch.softmax(self.prob_layer(outputs), dim=1)
if not self.fixed_gaussian:
mean = self.mean_layer(outputs).view(
bs, self.n_mel_channels, self.n_components)
log_var = self.log_var_layer(outputs).view(
bs, self.n_mel_channels, self.n_components)
else:
mean = self.mean
log_var = self.log_var
return mean, log_var, prob
class MelEncoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_embedding_dim=512, encoder_kernel_size=3,
encoder_n_convolutions=2, norm_fn=MaskedInstanceNorm1d):
super(MelEncoder, self).__init__()
convolutions = []
for i in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(80 if i == 0 else encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
norm_fn(encoder_embedding_dim, affine=True))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
bidirectional=True)
def run_padded_sequence(self, sorted_idx, unsort_idx,
lens, padded_data, recurrent_model):
"""Sorts input data by previded ordering (and un-ordering)
and runs the packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model through which to run the data
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original, unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def forward(self, x, lens):
mask = get_mask_from_lengths(lens).unsqueeze(1) if x.size(0) > 1 else None
for conv, norm in self.convolutions:
if mask is not None:
x.masked_fill_(~mask, 0.) # zero out padded values before applying convolution
x = F.dropout(F.relu(norm(conv(x), mask=mask)), 0.5, self.training)
del mask
x = x.permute(2, 0, 1) # (N, C, L) -> (L, N, C)
self.lstm.flatten_parameters()
if lens is not None:
# collect decreasing length indices
lens, ids = torch.sort(lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
x = self.run_padded_sequence(ids, original_ids, lens, x, self.lstm)
else:
x, _ = self.lstm(x)
# average pooling over time dimension
x = torch.mean(x, dim=0)
return x
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class DenseLayer(nn.Module):
def __init__(self, in_dim=1024, sizes=[1024, 1024]):
super(DenseLayer, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=True)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = torch.tanh(linear(x))
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions=3, encoder_embedding_dim=512,
encoder_kernel_size=5, norm_fn=nn.BatchNorm1d):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
norm_fn(encoder_embedding_dim, affine=True))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, in_lens):
"""
Args:
x (torch.tensor): N x C x L padded input of text embeddings
in_lens (torch.tensor): 1D tensor of sequence lengths
"""
mask = get_mask_from_lengths(in_lens).unsqueeze(1) if x.size(0) > 1 else None
for conv, norm in self.convolutions:
if mask is not None:
x.masked_fill_(~mask, 0.) # zero out padded values before applying convolution
x = F.dropout(F.relu(norm(conv(x), mask=mask)), 0.5, self.training)
del mask
x = x.transpose(1, 2)
x = nn.utils.rnn.pack_padded_sequence(x, in_lens.cpu(), batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Attention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128,
n_text_channels=512, n_att_channels=128, temperature=1.0):
super(Attention, self).__init__()
self.temperature = temperature
self.softmax = torch.nn.Softmax(dim=2)
self.query = LinearNorm(n_mel_channels,
n_att_channels, bias=False, w_init_gain='tanh')
self.key = LinearNorm(n_text_channels+n_speaker_dim,
n_att_channels, bias=False, w_init_gain='tanh')
self.value = LinearNorm(n_text_channels+n_speaker_dim,
n_att_channels, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(n_att_channels, 1, bias=False, w_init_gain='tanh')
self.score_mask_value = -float("inf")
def compute_attention_posterior(self, attn, attn_prior, mask=None,
eps=1e-20):
attn_prior = torch.log(attn_prior.float() + eps)
attn = torch.log(attn.float() + eps)
attn_posterior = attn + attn_prior
attn_logprob = attn_posterior.clone()
if mask is not None:
attn_posterior.data.masked_fill_(
mask.transpose(1, 2), self.score_mask_value)
attn_posterior = self.softmax(attn_posterior)
return attn_posterior, attn_logprob
def forward(self, queries, keys, values, mask=None, attn=None,
attn_prior=None):
"""
returns:
attention weights batch x mel_seq_len x text_seq_len
attention_context batch x featdim x mel_seq_len
sums to 1 over text_seq_len(keys)
"""
if attn is None:
keys = self.key(keys).transpose(0, 1)
values = self.value(values) if hasattr(self, 'value') else values
values = values.transpose(0, 1)
queries = self.query(queries).transpose(0, 1)
attn = self.v(torch.tanh((queries[:, :, None] + keys[:, None])))
attn = attn[..., 0] / self.temperature
if mask is not None:
attn.data.masked_fill_(mask.transpose(1, 2),
self.score_mask_value)
attn = self.softmax(attn)
if attn_prior is not None:
attn, attn_logprob = self.compute_attention_posterior(
attn, attn_prior, mask)
else:
attn_logprob = torch.log(attn.float() + 1e-8)
else:
attn_logprob = None
values = self.value(values)
values = values.transpose(0, 1)
output = torch.bmm(attn, values)
output = output.transpose(1, 2)
return output, attn, attn_logprob
class AR_Back_Step(torch.nn.Module):
def __init__(self, n_mel_channels, n_speaker_dim, n_text_dim,
n_in_channels, n_hidden, n_attn_channels, n_lstm_layers,
add_gate, use_cumm_attention):
super(AR_Back_Step, self).__init__()
self.ar_step = AR_Step(n_mel_channels, n_speaker_dim, n_text_dim,
n_mel_channels+n_speaker_dim, n_hidden,
n_attn_channels, n_lstm_layers, add_gate,
use_cumm_attention)
def forward(self, mel, text, mask, out_lens, attn_prior=None):
mel = torch.flip(mel, (0, ))
if attn_prior is not None:
attn_prior = torch.flip(attn_prior, (1, )) # (B, M, T)
# backwards flow, send padded zeros back to end
for k in range(mel.size(1)):
mel[:, k] = mel[:, k].roll(out_lens[k].item(), dims=0)
if attn_prior is not None:
attn_prior[k] = attn_prior[k].roll(out_lens[k].item(), dims=0)
mel, log_s, gates, attn_out, attention_logprobs = self.ar_step(
mel, text, mask, out_lens, attn_prior)
# move padded zeros back to beginning
for k in range(mel.size(1)):
mel[:, k] = mel[:, k].roll(-out_lens[k].item(), dims=0)
if attn_prior is not None:
attn_prior[k] = attn_prior[k].roll(-out_lens[k].item(), dims=0)
if attn_prior is not None:
attn_prior = torch.flip(attn_prior, (1, ))
return (torch.flip(mel, (0, )), log_s, gates,
attn_out, attention_logprobs)
def infer(self, residual, text, attns, attn_prior=None):
# only need to flip, no need for padding since bs=1
if attn_prior is not None:
# (B, M, T)
attn_prior = torch.flip(attn_prior, (1, ))
residual, attention_weights = self.ar_step.infer(
torch.flip(residual, (0, )), text, attns, attn_prior=attn_prior)
if attn_prior is not None:
attn_prior = torch.flip(attn_prior, (1, ))
residual = torch.flip(residual, (0, ))
return residual, attention_weights
class AR_Step(torch.nn.Module):
def __init__(self, n_mel_channels, n_speaker_dim, n_text_channels,
n_in_channels, n_hidden, n_attn_channels, n_lstm_layers,
add_gate, use_cumm_attention):
super(AR_Step, self).__init__()
self.use_cumm_attention = use_cumm_attention
self.conv = torch.nn.Conv1d(n_hidden, 2*n_mel_channels, 1)
self.conv.weight.data = 0.0*self.conv.weight.data
self.conv.bias.data = 0.0*self.conv.bias.data
self.lstm = torch.nn.LSTM(n_hidden+n_attn_channels, n_hidden, n_lstm_layers)
self.attention_lstm = torch.nn.LSTM(n_mel_channels, n_hidden)
self.attention_layer = Attention(n_hidden, n_speaker_dim,
n_text_channels, n_attn_channels)
if self.use_cumm_attention:
self.attn_cond_layer = AttentionConditioningLayer(
input_dim=2, attention_n_filters=32,
attention_kernel_sizes=[5, 3],
attention_dim=n_text_channels + n_speaker_dim)
self.dense_layer = DenseLayer(in_dim=n_hidden,
sizes=[n_hidden, n_hidden])
if add_gate:
self.gate_threshold = 0.5
self.gate_layer = LinearNorm(
n_hidden+n_attn_channels, 1, bias=True,
w_init_gain='sigmoid')
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens.cpu())
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def run_cumm_attn_sequence(self, attn_lstm_outputs, text, mask,
attn_prior=None):
seq_len, bsize, text_feat_dim = text.shape
# strangely, appending to a list is faster than pre-allocation
attention_context_all = []
attention_weights_all = []
attention_logprobs_all = []
attn_cumm_tensor = text[:, :, 0:1].permute(1, 2, 0)*0
attention_weights = attn_cumm_tensor*0
for i in range(attn_lstm_outputs.shape[0]):
attn_cat = torch.cat((attn_cumm_tensor, attention_weights), 1)
attn_cond_vector = self.attn_cond_layer(attn_cat).permute(2, 0, 1)
output = attn_lstm_outputs[i:i+1:, :]
(attention_context, attention_weights,
attention_logprobs) = self.attention_layer(
output, text*attn_cond_vector, text, mask=mask,
attn_prior=attn_prior)
attention_context_all += [attention_context]
attention_weights_all += [attention_weights]
attention_logprobs_all += [attention_logprobs]
attn_cumm_tensor = attn_cumm_tensor + attention_weights
attention_context_all = torch.cat(attention_context_all, 2)
attention_weights_all = torch.cat(attention_weights_all, 1)
attention_logprobs_all = torch.cat(attention_logprobs_all, 1)
return {'attention_context': attention_context_all,
'attention_weights': attention_weights_all,
'attention_logprobs': attention_logprobs_all}
def forward(self, mel, text, mask, out_lens, attn_prior=None):
dummy = torch.FloatTensor(1, mel.size(1), mel.size(2)).zero_()
dummy = dummy.type(mel.type())
# seq_len x batch x dim
mel0 = torch.cat([dummy, mel[:-1, :, :]], 0)
if out_lens is not None:
# collect decreasing length indices
lens, ids = torch.sort(out_lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
# mel_seq_len x batch x hidden_dim
attention_hidden = self.run_padded_sequence(
ids, original_ids, lens, mel0, self.attention_lstm)
else:
attention_hidden = self.attention_lstm(mel0)[0]
if hasattr(self, 'use_cumm_attention') and self.use_cumm_attention:
cumm_attn_output_dict = self.run_cumm_attn_sequence(
attention_hidden, text, mask)
attention_context = cumm_attn_output_dict['attention_context']
attention_weights = cumm_attn_output_dict['attention_weights']
attention_logprobs = cumm_attn_output_dict['attention_logprobs']
else:
(attention_context, attention_weights,
attention_logprobs) = self.attention_layer(
attention_hidden, text, text, mask=mask, attn_prior=attn_prior)
attention_context = attention_context.permute(2, 0, 1)
decoder_input = torch.cat((attention_hidden, attention_context), -1)
gates = None
if hasattr(self, 'gate_layer'):
# compute gates before packing
gates = self.gate_layer(decoder_input)
if out_lens is not None:
# reorder, run padded sequence and undo reordering
lstm_hidden = self.run_padded_sequence(
ids, original_ids, lens, decoder_input, self.lstm)
else:
lstm_hidden = self.lstm(decoder_input)[0]
lstm_hidden = self.dense_layer(lstm_hidden).permute(1, 2, 0)
decoder_output = self.conv(lstm_hidden).permute(2, 0, 1)
log_s = decoder_output[:, :, :mel.size(2)]
b = decoder_output[:, :, mel.size(2):]
mel = torch.exp(log_s) * mel + b
return mel, log_s, gates, attention_weights, attention_logprobs
def infer(self, residual, text, attns, attn_prior=None):
attn_cond_vector = 1.0
if hasattr(self, 'use_cumm_attention') and self.use_cumm_attention:
attn_cumm_tensor = text[:, :, 0:1].permute(1, 2, 0)*0
attention_weight = attn_cumm_tensor*0
attention_weights = []
total_output = [] # seems 10FPS faster than pre-allocation
output = None
attn = None
dummy = torch.cuda.FloatTensor(
1, residual.size(1), residual.size(2)).zero_()
for i in range(0, residual.size(0)):
if i == 0:
attention_hidden, (h, c) = self.attention_lstm(dummy)
else:
attention_hidden, (h, c) = self.attention_lstm(output, (h, c))
if hasattr(self, 'use_cumm_attention') and self.use_cumm_attention:
attn_cat = torch.cat((attn_cumm_tensor, attention_weight), 1)
attn_cond_vector = self.attn_cond_layer(attn_cat).permute(2, 0, 1)
attn = None if attns is None else attns[i][None, None]
attn_prior_i = None if attn_prior is None else attn_prior[:, i][None]
(attention_context, attention_weight,
attention_logprob) = self.attention_layer(
attention_hidden, text * attn_cond_vector, text, attn=attn,
attn_prior=attn_prior_i)
if hasattr(self, 'use_cumm_attention') and self.use_cumm_attention:
attn_cumm_tensor = attn_cumm_tensor + attention_weight
attention_weights.append(attention_weight)
attention_context = attention_context.permute(2, 0, 1)
decoder_input = torch.cat((
attention_hidden, attention_context), -1)
if i == 0:
lstm_hidden, (h1, c1) = self.lstm(decoder_input)
else:
lstm_hidden, (h1, c1) = self.lstm(decoder_input, (h1, c1))
lstm_hidden = self.dense_layer(lstm_hidden).permute(1, 2, 0)
decoder_output = self.conv(lstm_hidden).permute(2, 0, 1)
log_s = decoder_output[:, :, :decoder_output.size(2)//2]
b = decoder_output[:, :, decoder_output.size(2)//2:]
output = (residual[i, :, :] - b)/torch.exp(log_s)
total_output.append(output)
if (hasattr(self, 'gate_layer') and
torch.sigmoid(self.gate_layer(decoder_input)) > self.gate_threshold):
print("Hitting gate limit")
break
total_output = torch.cat(total_output, 0)
return total_output, attention_weights
class Flowtron(torch.nn.Module):
def __init__(self, n_speakers, n_speaker_dim, n_text, n_text_dim, n_flows,
n_mel_channels, n_hidden, n_attn_channels, n_lstm_layers,
use_gate_layer, mel_encoder_n_hidden, n_components,
fixed_gaussian, mean_scale, dummy_speaker_embedding,
use_cumm_attention):
super(Flowtron, self).__init__()
norm_fn = MaskedInstanceNorm1d
self.speaker_embedding = torch.nn.Embedding(n_speakers, n_speaker_dim)
self.embedding = torch.nn.Embedding(n_text, n_text_dim)
self.flows = torch.nn.ModuleList()
self.encoder = Encoder(norm_fn=norm_fn, encoder_embedding_dim=n_text_dim)
self.dummy_speaker_embedding = dummy_speaker_embedding
if n_components > 1:
self.mel_encoder = MelEncoder(mel_encoder_n_hidden, norm_fn=norm_fn)
self.gaussian_mixture = GaussianMixture(mel_encoder_n_hidden,
n_components,
n_mel_channels,
fixed_gaussian, mean_scale)
for i in range(n_flows):
add_gate = True if (i == (n_flows-1) and use_gate_layer) else False
if i % 2 == 0:
self.flows.append(AR_Step(n_mel_channels, n_speaker_dim,
n_text_dim,
n_mel_channels+n_speaker_dim,
n_hidden, n_attn_channels,
n_lstm_layers, add_gate,
use_cumm_attention))
else:
self.flows.append(AR_Back_Step(n_mel_channels, n_speaker_dim,
n_text_dim,
n_mel_channels+n_speaker_dim,
n_hidden, n_attn_channels,
n_lstm_layers, add_gate,
use_cumm_attention))
def forward(self, mel, speaker_ids, text, in_lens, out_lens,
attn_prior=None):
speaker_ids = speaker_ids*0 if self.dummy_speaker_embedding else speaker_ids
speaker_vecs = self.speaker_embedding(speaker_ids)
text = self.embedding(text).transpose(1, 2)
text = self.encoder(text, in_lens)
mean, log_var, prob = None, None, None
if hasattr(self, 'gaussian_mixture'):
mel_embedding = self.mel_encoder(mel, out_lens)
mean, log_var, prob = self.gaussian_mixture(
mel_embedding, mel_embedding.size(0))
text = text.transpose(0, 1)
mel = mel.permute(2, 0, 1)
encoder_outputs = torch.cat(
[text, speaker_vecs.expand(text.size(0), -1, -1)], 2)
log_s_list = []
attns_list = []
attns_logprob_list = []
mask = ~get_mask_from_lengths(in_lens)[..., None]
for i, flow in enumerate(self.flows):
mel, log_s, gate, attn_out, attn_logprob_out = flow(
mel, encoder_outputs, mask, out_lens, attn_prior)
log_s_list.append(log_s)
attns_list.append(attn_out)
attns_logprob_list.append(attn_logprob_out)
return (mel, log_s_list, gate, attns_list,
attns_logprob_list, mean, log_var, prob)
def infer(self, residual, speaker_ids, text, temperature=1.0,
gate_threshold=0.5, attns=None, attn_prior=None):
"""Inference function. Inverse of the forward pass
Args:
residual: 1 x 80 x N_residual tensor of sampled z values
speaker_ids: 1 x 1 tensor of integral speaker ids (should be a single value)
text (torch.int64): 1 x N_text tensor holding text-token ids
Returns:
residual: input residual after flow transformation. Technically the mel spectrogram values
attention_weights: attention weights predicted by each flow step for mel-text alignment
"""
speaker_ids = speaker_ids*0 if self.dummy_speaker_embedding else speaker_ids
speaker_vecs = self.speaker_embedding(speaker_ids)
text = self.embedding(text).transpose(1, 2)
text = self.encoder.infer(text)
text = text.transpose(0, 1)
encoder_outputs = torch.cat(
[text, speaker_vecs.expand(text.size(0), -1, -1)], 2)
residual = residual.permute(2, 0, 1)
attention_weights = []
for i, flow in enumerate(reversed(self.flows)):
attn = None if attns is None else reversed(attns)[i]
self.set_temperature_and_gate(flow, temperature, gate_threshold)
residual, attention_weight = flow.infer(
residual, encoder_outputs, attn, attn_prior=attn_prior)
attention_weights.append(attention_weight)
return residual.permute(1, 2, 0), attention_weights
def test_invertibility(self, residual, speaker_ids, text, temperature=1.0,
gate_threshold=0.5, attns=None):
"""Model invertibility check. Call this the same way you would call self.infer()
Args:
residual: 1 x 80 x N_residual tensor of sampled z values
speaker_ids: 1 x 1 tensor of integral speaker ids (should be a single value)
text (torch.int64): 1 x N_text tensor holding text-token ids
Returns:
error: should be in the order of 1e-5 or less, or there may be an invertibility bug
"""
mel, attn_weights = self.infer(residual, speaker_ids, text)
in_lens = torch.LongTensor([text.shape[1]]).cuda()
residual_recon, log_s_list, gate, _, _, _, _ = self.forward(mel,
speaker_ids, text,
in_lens, None)
residual_permuted = residual.permute(2, 0, 1)
if len(self.flows) % 2 == 0:
residual_permuted = torch.flip(residual_permuted, (0,))
residual_recon = torch.flip(residual_recon, (0,))
error = (residual_recon - residual_permuted[0:residual_recon.shape[0]]).abs().mean()
return error
@staticmethod
def set_temperature_and_gate(flow, temperature, gate_threshold):
flow = flow.ar_step if hasattr(flow, "ar_step") else flow
flow.attention_layer.temperature = temperature
if hasattr(flow, 'gate_layer'):
flow.gate_threshold = gate_threshold
| flowtron-master | flowtron.py |
import torch
import numpy as np
from scipy.signal import get_window
from librosa.filters import mel as librosa_mel_fn
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=None):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| flowtron-master | audio_processing.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import os
import torch
import torch.distributed as dist
from torch.autograd import Variable
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= num_gpus
return rt
def init_distributed(rank, num_gpus, dist_backend, dist_url):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print('> initializing distributed for rank {} out '
'of {}'.format(rank, num_gpus))
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='nccl',
world_size=num_gpus,
rank=rank,
init_method=init_method)
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def apply_gradient_allreduce(module):
"""
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
"""
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
dir(param)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
| flowtron-master | distributed.py |
"""RAdam
Original source taken from https://github.com/LiyuanLucasLiu/RAdam
Copyright 2019 Liyuan Liu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import torch
# pylint: disable=no-name-in-module
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""RAdam optimizer"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
"""
Init
:param params: parameters to optimize
:param lr: learning rate
:param betas: beta
:param eps: numerical precision
:param weight_decay: weight decay weight
"""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for _ in range(10)]
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients'
)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = (
state['exp_avg_sq'].type_as(p_data_fp32)
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = (
N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
group['lr'] *
math.sqrt(
(1 - beta2_t) * (N_sma - 4) /
(N_sma_max - 4) * (N_sma - 2) /
N_sma * N_sma_max / (N_sma_max - 2)
) / (1 - beta1 ** state['step'])
)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(
-group['weight_decay'] * group['lr'], p_data_fp32
)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
| flowtron-master | radam.py |
###########################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import argparse
import json
import os
import torch
from torch.utils.data import DataLoader
from torch.cuda import amp
import ast
from flowtron import FlowtronLoss
from flowtron import Flowtron
from data import Data, DataCollate
from flowtron_logger import FlowtronLogger
from radam import RAdam
# =====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed
from distributed import apply_gradient_allreduce
from distributed import reduce_tensor
from torch.utils.data.distributed import DistributedSampler
# =====END: ADDED FOR DISTRIBUTED======
def update_params(config, params):
for param in params:
print(param)
k, v = param.split("=")
try:
v = ast.literal_eval(v)
except:
print("{}:{} was not parsed".format(k, v))
pass
k_split = k.split('.')
if len(k_split) > 1:
parent_k = k_split[0]
cur_param = ['.'.join(k_split[1:])+"="+str(v)]
update_params(config[parent_k], cur_param)
elif k in config and len(k_split) == 1:
config[k] = v
else:
print("{}, {} params not updated".format(k, v))
def prepare_dataloaders(data_config, n_gpus, batch_size):
# Get data, data loaders and 1ollate function ready
ignore_keys = ['training_files', 'validation_files']
trainset = Data(data_config['training_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys))
valset = Data(data_config['validation_files'],
**dict((k, v) for k, v in data_config.items()
if k not in ignore_keys), speaker_ids=trainset.speaker_ids)
collate_fn = DataCollate(
n_frames_per_step=1, use_attn_prior=trainset.use_attn_prior)
train_sampler, shuffle = None, True
if n_gpus > 1:
train_sampler, shuffle = DistributedSampler(trainset), False
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler, batch_size=batch_size,
pin_memory=False, drop_last=True,
collate_fn=collate_fn)
return train_loader, valset, collate_fn
def warmstart(checkpoint_path, model, include_layers=None):
print("Warm starting model", checkpoint_path)
pretrained_dict = torch.load(checkpoint_path, map_location='cpu')
if 'model' in pretrained_dict:
pretrained_dict = pretrained_dict['model'].state_dict()
else:
pretrained_dict = pretrained_dict['state_dict']
if include_layers is not None:
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if any(l in k for l in include_layers)}
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict}
if (pretrained_dict['speaker_embedding.weight'].shape !=
model_dict['speaker_embedding.weight'].shape):
del pretrained_dict['speaker_embedding.weight']
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer, ignore_layers=[]):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
model_dict = checkpoint_dict['model'].state_dict()
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
else:
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model.load_state_dict(model_dict)
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = Flowtron(**model_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def compute_validation_loss(model, criterion, valset, batch_size,
n_gpus, apply_ctc):
model.eval()
with torch.no_grad():
collate_fn = DataCollate(
n_frames_per_step=1, use_attn_prior=valset.use_attn_prior)
val_sampler = DistributedSampler(valset) if n_gpus > 1 else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss, val_loss_nll, val_loss_gate = 0.0, 0.0, 0.0
val_loss_ctc = 0.0
n_batches = len(val_loader)
for i, batch in enumerate(val_loader):
(mel, spk_ids, txt, in_lens, out_lens,
gate_target, attn_prior) = batch
mel, spk_ids, txt = mel.cuda(), spk_ids.cuda(), txt.cuda()
in_lens, out_lens = in_lens.cuda(), out_lens.cuda()
gate_target = gate_target.cuda()
attn_prior = attn_prior.cuda() if attn_prior is not None else None
(z, log_s_list, gate_pred, attn, attn_logprob,
mean, log_var, prob) = model(
mel, spk_ids, txt, in_lens, out_lens, attn_prior)
loss_nll, loss_gate, loss_ctc = criterion(
(z, log_s_list, gate_pred, attn,
attn_logprob, mean, log_var, prob),
gate_target, in_lens, out_lens, is_validation=True)
loss = loss_nll + loss_gate
if apply_ctc:
loss += loss_ctc * criterion.ctc_loss_weight
if n_gpus > 1:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_val_loss_nll = reduce_tensor(
loss_nll.data, n_gpus).item()
reduced_val_loss_gate = reduce_tensor(
loss_gate.data, n_gpus).item()
reduced_val_loss_ctc = reduce_tensor(
loss_ctc.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
reduced_val_loss_nll = loss_nll.item()
reduced_val_loss_gate = loss_gate.item()
reduced_val_loss_ctc = loss_ctc.item()
val_loss += reduced_val_loss
val_loss_nll += reduced_val_loss_nll
val_loss_gate += reduced_val_loss_gate
val_loss_ctc += reduced_val_loss_ctc
val_loss = val_loss / n_batches
val_loss_nll = val_loss_nll / n_batches
val_loss_gate = val_loss_gate / n_batches
val_loss_ctc = val_loss_ctc / n_batches
print("Mean {}\nLogVar {}\nProb {}".format(mean, log_var, prob))
model.train()
return (val_loss, val_loss_nll, val_loss_gate,
val_loss_ctc, attn, gate_pred, gate_target)
def train(n_gpus, rank, output_directory, epochs, optim_algo, learning_rate,
weight_decay, sigma, iters_per_checkpoint, batch_size, seed,
checkpoint_path, ignore_layers, include_layers, finetune_layers,
warmstart_checkpoint_path, with_tensorboard, grad_clip_val,
gate_loss, fp16_run, use_ctc_loss, ctc_loss_weight,
blank_logprob, ctc_loss_start_iter):
fp16_run = bool(fp16_run)
use_ctc_loss = bool(use_ctc_loss)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if n_gpus > 1:
init_distributed(rank, n_gpus, **dist_config)
criterion = FlowtronLoss(sigma, bool(model_config['n_components']),
gate_loss, use_ctc_loss, ctc_loss_weight,
blank_logprob)
model = Flowtron(**model_config).cuda()
if len(finetune_layers):
for name, param in model.named_parameters():
if name in finetune_layers:
param.requires_grad = True
else:
param.requires_grad = False
print("Initializing %s optimizer" % (optim_algo))
if optim_algo == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=weight_decay)
elif optim_algo == 'RAdam':
optimizer = RAdam(model.parameters(), lr=learning_rate,
weight_decay=weight_decay)
else:
print("Unrecognized optimizer %s!" % (optim_algo))
exit(1)
# Load checkpoint if one exists
iteration = 0
if warmstart_checkpoint_path != "":
model = warmstart(warmstart_checkpoint_path, model)
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
optimizer, ignore_layers)
iteration += 1 # next iteration is iteration + 1
if n_gpus > 1:
model = apply_gradient_allreduce(model)
print(model)
scaler = amp.GradScaler(enabled=fp16_run)
train_loader, valset, collate_fn = prepare_dataloaders(
data_config, n_gpus, batch_size)
# Get shared output_directory ready
if rank == 0 and not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("Output directory", output_directory)
if with_tensorboard and rank == 0:
tboard_out_path = os.path.join(output_directory, 'logs')
print("Setting up Tensorboard log in %s" % (tboard_out_path))
logger = FlowtronLogger(tboard_out_path)
# force set the learning rate to what is specified
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
apply_ctc = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for batch in train_loader:
model.zero_grad()
(mel, spk_ids, txt, in_lens, out_lens,
gate_target, attn_prior) = batch
mel, spk_ids, txt = mel.cuda(), spk_ids.cuda(), txt.cuda()
in_lens, out_lens = in_lens.cuda(), out_lens.cuda()
gate_target = gate_target.cuda()
attn_prior = attn_prior.cuda() if attn_prior is not None else None
if use_ctc_loss and iteration >= ctc_loss_start_iter:
apply_ctc = True
with amp.autocast(enabled=fp16_run):
(z, log_s_list, gate_pred, attn,
attn_logprob, mean, log_var, prob) = model(
mel, spk_ids, txt, in_lens, out_lens, attn_prior)
loss_nll, loss_gate, loss_ctc = criterion(
(z, log_s_list, gate_pred, attn,
attn_logprob, mean, log_var, prob),
gate_target, in_lens, out_lens, is_validation=False)
loss = loss_nll + loss_gate
if apply_ctc:
loss += loss_ctc * criterion.ctc_loss_weight
if n_gpus > 1:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_gate_loss = reduce_tensor(
loss_gate.data,
n_gpus).item()
reduced_mle_loss = reduce_tensor(
loss_nll.data,
n_gpus).item()
reduced_ctc_loss = reduce_tensor(
loss_ctc.data,
n_gpus).item()
else:
reduced_loss = loss.item()
reduced_gate_loss = loss_gate.item()
reduced_mle_loss = loss_nll.item()
reduced_ctc_loss = loss_ctc.item()
scaler.scale(loss).backward()
if grad_clip_val > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(),
grad_clip_val)
scaler.step(optimizer)
scaler.update()
if rank == 0:
print("{}:\t{:.9f}".format(
iteration,
reduced_loss),
flush=True)
if with_tensorboard and rank == 0:
logger.add_scalar('training/loss', reduced_loss, iteration)
logger.add_scalar(
'training/loss_gate',
reduced_gate_loss,
iteration)
logger.add_scalar(
'training/loss_nll',
reduced_mle_loss,
iteration)
logger.add_scalar(
'training/loss_ctc',
reduced_ctc_loss,
iteration)
logger.add_scalar(
'learning_rate',
learning_rate,
iteration)
if iteration % iters_per_checkpoint == 0:
(val_loss, val_loss_nll, val_loss_gate, val_loss_ctc,
attns, gate_pred, gate_target) = \
compute_validation_loss(model, criterion, valset,
batch_size, n_gpus, apply_ctc)
if rank == 0:
print("Validation loss {}: {:9f} ".format(
iteration, val_loss))
if with_tensorboard:
logger.log_validation(
val_loss, val_loss_nll,
val_loss_gate, val_loss_ctc,
attns, gate_pred, gate_target, iteration)
checkpoint_path = "{}/model_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
args = parser.parse_args()
args.rank = 0
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
update_params(config, args.params)
print(config)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global model_config
model_config = config["model_config"]
# Make sure the launcher sets `RANK` and `WORLD_SIZE`.
rank = int(os.getenv('RANK', '0'))
n_gpus = int(os.getenv("WORLD_SIZE", '1'))
print('> got rank {} and world size {} ...'.format(rank, n_gpus))
if n_gpus == 1 and rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(n_gpus, rank, **train_config)
| flowtron-master | train.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import os
import argparse
import json
import sys
import numpy as np
import torch
from flowtron import Flowtron
from torch.utils.data import DataLoader
from data import Data
from train import update_params
sys.path.insert(0, "tacotron2")
sys.path.insert(0, "tacotron2/waveglow")
from glow import WaveGlow
from scipy.io.wavfile import write
def infer(flowtron_path, waveglow_path, output_dir, text, speaker_id, n_frames,
sigma, gate_threshold, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# load waveglow
waveglow = torch.load(waveglow_path)['model'].cuda().eval()
waveglow.cuda().half()
for k in waveglow.convinv:
k.float()
waveglow.eval()
# load flowtron
model = Flowtron(**model_config).cuda()
state_dict = torch.load(flowtron_path, map_location='cpu')['state_dict']
model.load_state_dict(state_dict)
model.eval()
print("Loaded checkpoint '{}')" .format(flowtron_path))
ignore_keys = ['training_files', 'validation_files']
trainset = Data(
data_config['training_files'],
**dict((k, v) for k, v in data_config.items() if k not in ignore_keys))
speaker_vecs = trainset.get_speaker_id(speaker_id).cuda()
text = trainset.get_text(text).cuda()
speaker_vecs = speaker_vecs[None]
text = text[None]
with torch.no_grad():
residual = torch.cuda.FloatTensor(1, 80, n_frames).normal_() * sigma
mels, attentions = model.infer(
residual, speaker_vecs, text, gate_threshold=gate_threshold)
for k in range(len(attentions)):
attention = torch.cat(attentions[k]).cpu().numpy()
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
axes[0].imshow(mels[0].cpu().numpy(), origin='bottom', aspect='auto')
axes[1].imshow(attention[:, 0].transpose(), origin='bottom', aspect='auto')
fig.savefig(os.path.join(output_dir, 'sid{}_sigma{}_attnlayer{}.png'.format(speaker_id, sigma, k)))
plt.close("all")
with torch.no_grad():
audio = waveglow.infer(mels.half(), sigma=0.8).float()
audio = audio.cpu().numpy()[0]
# normalize audio for now
audio = audio / np.abs(audio).max()
print(audio.shape)
write(os.path.join(output_dir, 'sid{}_sigma{}.wav'.format(speaker_id, sigma)),
data_config['sampling_rate'], audio)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-p', '--params', nargs='+', default=[])
parser.add_argument('-f', '--flowtron_path',
help='Path to flowtron state dict', type=str)
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow state dict', type=str)
parser.add_argument('-t', '--text', help='Text to synthesize', type=str)
parser.add_argument('-i', '--id', help='Speaker id', type=int)
parser.add_argument('-n', '--n_frames', help='Number of frames',
default=400, type=int)
parser.add_argument('-o', "--output_dir", default="results/")
parser.add_argument("-s", "--sigma", default=0.5, type=float)
parser.add_argument("-g", "--gate", default=0.5, type=float)
parser.add_argument("--seed", default=1234, type=int)
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
global config
config = json.loads(data)
update_params(config, args.params)
data_config = config["data_config"]
global model_config
model_config = config["model_config"]
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
infer(args.flowtron_path, args.waveglow_path, args.output_dir, args.text,
args.id, args.n_frames, args.sigma, args.gate, args.seed)
| flowtron-master | inference.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
| flowtron-master | flowtron_plotting_utils.py |
###############################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import re
import os
import argparse
import json
import random
import numpy as np
import torch
import torch.utils.data
from scipy.io.wavfile import read
from scipy.stats import betabinom
from audio_processing import TacotronSTFT
from text import text_to_sequence, cmudict, _clean_text, get_arpabet
def beta_binomial_prior_distribution(phoneme_count, mel_count,
scaling_factor=1.0):
P, M = phoneme_count, mel_count
x = np.arange(0, P)
mel_text_probs = []
for i in range(1, M+1):
a, b = scaling_factor*i, scaling_factor*(M+1-i)
rv = betabinom(P - 1, a, b)
mel_i_prob = rv.pmf(x)
mel_text_probs.append(mel_i_prob)
return torch.tensor(np.array(mel_text_probs))
def load_filepaths_and_text(filelist, split="|"):
if isinstance(filelist, str):
with open(filelist, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
else:
filepaths_and_text = filelist
return filepaths_and_text
def load_wav_to_torch(full_path):
""" Loads wavdata into torch array """
sampling_rate, data = read(full_path)
return torch.from_numpy(data).float(), sampling_rate
class Data(torch.utils.data.Dataset):
def __init__(self, filelist_path, filter_length, hop_length, win_length,
sampling_rate, mel_fmin, mel_fmax, max_wav_value, p_arpabet,
cmudict_path, text_cleaners, speaker_ids=None,
use_attn_prior=False, attn_prior_threshold=1e-4,
prior_cache_path="", betab_scaling_factor=1.0, randomize=True,
keep_ambiguous=False, seed=1234):
self.max_wav_value = max_wav_value
self.audiopaths_and_text = load_filepaths_and_text(filelist_path)
self.use_attn_prior = use_attn_prior
self.betab_scaling_factor = betab_scaling_factor
self.attn_prior_threshold = attn_prior_threshold
self.keep_ambiguous = keep_ambiguous
if speaker_ids is None or speaker_ids == '':
self.speaker_ids = self.create_speaker_lookup_table(
self.audiopaths_and_text)
else:
self.speaker_ids = speaker_ids
self.stft = TacotronSTFT(filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
sampling_rate=sampling_rate,
mel_fmin=mel_fmin, mel_fmax=mel_fmax)
self.sampling_rate = sampling_rate
self.text_cleaners = text_cleaners
self.p_arpabet = p_arpabet
self.cmudict = cmudict.CMUDict(
cmudict_path, keep_ambiguous=keep_ambiguous)
if speaker_ids is None:
self.speaker_ids = self.create_speaker_lookup_table(
self.audiopaths_and_text)
else:
self.speaker_ids = speaker_ids
# caching makes sense for p_phoneme=1.0
# for other values, everytime text lengths will change
self.prior_cache_path = prior_cache_path
self.caching_enabled = False
if (self.prior_cache_path is not None and
self.prior_cache_path != "" and p_arpabet == 1.0):
self.caching_enabled = True
# make sure caching path exists
if (self.caching_enabled and
not os.path.exists(self.prior_cache_path)):
os.makedirs(self.prior_cache_path)
random.seed(seed)
if randomize:
random.shuffle(self.audiopaths_and_text)
def compute_attention_prior(self, audiopath, mel_length, text_length):
folder_path = audiopath.split('/')[-2]
filename = os.path.basename(audiopath).split('.')[0]
prior_path = os.path.join(
self.prior_cache_path,
folder_path + "_" + filename)
prior_path += "_prior.pth"
prior_loaded = False
if self.caching_enabled and os.path.exists(prior_path):
attn_prior = torch.load(prior_path)
if (attn_prior.shape[1] == text_length and
attn_prior.shape[0] == mel_length):
prior_loaded = True
else:
print("Prior size mismatch, recomputing")
if not prior_loaded:
attn_prior = beta_binomial_prior_distribution(
text_length,
mel_length,
self.betab_scaling_factor)
if self.caching_enabled:
torch.save(attn_prior, prior_path)
if self.attn_prior_threshold > 0:
attn_prior = attn_prior.masked_fill(
attn_prior < self.attn_prior_threshold, 0.0)
return attn_prior
def create_speaker_lookup_table(self, audiopaths_and_text):
speaker_ids = np.sort(np.unique([x[2] for x in audiopaths_and_text]))
d = {int(speaker_ids[i]): i for i in range(len(speaker_ids))}
print("Number of speakers :", len(d))
return d
def get_mel(self, audio):
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
return melspec
def get_speaker_id(self, speaker_id):
return torch.LongTensor([self.speaker_ids[int(speaker_id)]])
def get_text(self, text):
text = _clean_text(text, self.text_cleaners)
words = re.findall(r'\S*\{.*?\}\S*|\S+', text)
text = ' '.join([get_arpabet(word, self.cmudict)
if random.random() < self.p_arpabet else word
for word in words])
text_norm = torch.LongTensor(text_to_sequence(text))
return text_norm
def __getitem__(self, index):
# Read audio and text
audiopath, text, speaker_id = self.audiopaths_and_text[index]
audio, sampling_rate = load_wav_to_torch(audiopath)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
mel = self.get_mel(audio)
text_encoded = self.get_text(text)
speaker_id = self.get_speaker_id(speaker_id)
attn_prior = None
if self.use_attn_prior:
attn_prior = self.compute_attention_prior(
audiopath, mel.shape[1], text_encoded.shape[0])
return (mel, speaker_id, text_encoded, attn_prior)
def __len__(self):
return len(self.audiopaths_and_text)
class DataCollate():
""" Zero-pads model inputs and targets based on number of frames per step """
def __init__(self, n_frames_per_step=1, use_attn_prior=False):
self.n_frames_per_step = n_frames_per_step
self.use_attn_prior = use_attn_prior
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram """
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[2]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0].item()
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][2]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mel_channels = batch[0][0].size(0)
max_target_len = max([x[0].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded, gate padded and speaker ids
mel_padded = torch.FloatTensor(
len(batch), num_mel_channels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
attn_prior_padded = None
if self.use_attn_prior:
attn_prior_padded = torch.FloatTensor(
len(batch), max_target_len, max_input_len)
attn_prior_padded.zero_()
speaker_ids = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][0]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
speaker_ids[i] = batch[ids_sorted_decreasing[i]][1]
if self.use_attn_prior:
cur_attn_prior = batch[ids_sorted_decreasing[i]][3]
attn_prior_padded[
i,
:cur_attn_prior.size(0),
:cur_attn_prior.size(1)] = cur_attn_prior
return (mel_padded, speaker_ids, text_padded, input_lengths,
output_lengths, gate_padded, attn_prior_padded)
# ===================================================================
# Takes directory of clean audio and makes directory of spectrograms
# Useful for making test sets
# ===================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-f', '--filelist', type=str,
help='List of files to generate mels')
parser.add_argument('-o', '--output_dir', type=str,
help='Output directory')
args = parser.parse_args()
with open(args.config) as f:
data = f.read()
data_config = json.loads(data)["data_config"]
mel2samp = Data(**data_config)
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
filepaths_and_text = load_filepaths_and_text(args.filelist)
for (filepath, text, speaker_id) in filepaths_and_text:
print("speaker id", speaker_id)
print("text", text)
print("text encoded", mel2samp.get_text(text))
audio, sr = load_wav_to_torch(filepath)
melspectrogram = mel2samp.get_mel(audio)
filename = os.path.basename(filepath)
new_filepath = args.output_dir + '/' + filename + '.pt'
print(new_filepath)
torch.save(melspectrogram, new_filepath)
| flowtron-master | data.py |
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| flowtron-master | text/cmudict.py |
""" from https://github.com/keithito/tacotron """
import re
from text import cleaners
from text.symbols import symbols
from text.symbols import _punctuation as punctuation_symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# for arpabet with apostrophe
_apostrophe = re.compile(r"(?=\S*['])([a-zA-Z'-]+)")
def text_to_sequence(text):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(text)
break
sequence += _symbols_to_sequence(m.group(1))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
def get_arpabet(word, cmudict, index=0):
re_start_punc = r"\A\W+"
re_end_punc = r"\W+\Z"
start_symbols = re.findall(re_start_punc, word)
if len(start_symbols):
start_symbols = start_symbols[0]
word = word[len(start_symbols):]
else:
start_symbols = ''
end_symbols = re.findall(re_end_punc, word)
if len(end_symbols):
end_symbols = end_symbols[0]
word = word[:-len(end_symbols)]
else:
end_symbols = ''
arpabet_suffix = ''
if _apostrophe.match(word) is not None and word.lower() != "it's" and word.lower()[-1] == 's':
word = word[:-2]
arpabet_suffix = ' Z'
arpabet = None if word.lower() in HETERONYMS else cmudict.lookup(word)
if arpabet is not None:
return start_symbols + '{%s}' % (arpabet[index] + arpabet_suffix) + end_symbols
else:
return start_symbols + word + end_symbols
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
HETERONYMS = set(files_to_list('data/heteronyms'))
| flowtron-master | text/__init__.py |
""" from https://github.com/keithito/tacotron """
import inflect
import re
_large_numbers = '(trillion|billion|million|thousand|hundred)'
_measurements = '(f|c|k|d)'
_measurements_key = {'f': 'fahrenheit', 'c': 'celsius', 'k': 'thousand', 'd': 'd'}
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+[ ]?{}?)'.format(_large_numbers), re.IGNORECASE)
_measurement_re = re.compile(r'([0-9\.\,]*[0-9]+(\s)?{}\b)'.format(_measurements), re.IGNORECASE)
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r"[0-9]+'s|[0-9]+")
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
# check for million, billion, etc...
parts = match.split(' ')
if len(parts) == 2 and len(parts[1]) > 0 and parts[1] in _large_numbers:
return "{} {} {} ".format(parts[0], parts[1], 'dollars')
parts = parts[0].split('.')
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {} ".format(
_inflect.number_to_words(dollars), dollar_unit,
_inflect.number_to_words(cents), cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return "{} {} ".format(_inflect.number_to_words(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {} ".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_measurement(m):
_, number, measurement = re.split('(\d+(?:\.\d+)?)', m.group(0))
number = _inflect.number_to_words(number)
measurement = "".join(measurement.split())
measurement = _measurements_key[measurement.lower()]
return "{} {}".format(number, measurement)
def _expand_number(m):
_, number, suffix = re.split(r"(\d+(?:'\d+)?)", m.group(0))
num = int(number)
if num > 1000 and num < 3000:
if num == 2000:
text = 'two thousand'
elif num > 2000 and num < 2010:
text = 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
text = _inflect.number_to_words(num // 100) + ' hundred'
else:
num = _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
num = re.sub(r'-', ' ', num)
text = num
else:
num = _inflect.number_to_words(num, andword='')
num = re.sub(r'-', ' ', num)
num = re.sub(r',', '', num)
text = num
if suffix == "'s" and text[-1] == 'y':
text = text[:-1] + 'ies'
return text
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_number_re, _expand_number, text)
return text
| flowtron-master | text/numbers.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_numbers = '0123456789'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = list(_punctuation + _math + _special + _accented + _numbers + _letters) + _arpabet
| flowtron-master | text/symbols.py |
""" adapted from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
from .acronyms import normalize_acronyms
from .datestime import normalize_datestime
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('ms', 'miss'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
_safe_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('no', 'number'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_safe_abbreviations(text):
for regex, replacement in _safe_abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def expand_acronyms(text):
return normalize_acronyms(text)
def expand_datestime(text):
return normalize_datestime(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def separate_acronyms(text):
text = re.sub(r"([0-9]+)([a-zA-Z]+)", r"\1 \2", text)
text = re.sub(r"([a-zA-Z]+)([0-9]+)", r"\1 \2", text)
return text
def remove_hyphens(text):
text = re.sub(r'(?<=\w)(-)(?=\w)', ' ', text)
return text
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def flowtron_cleaners(text):
text = collapse_whitespace(text)
text = remove_hyphens(text)
text = expand_datestime(text)
text = expand_numbers(text)
text = expand_safe_abbreviations(text)
text = expand_acronyms(text)
return text
def english_cleaners(text):
'''Pipeline for English text, with number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| flowtron-master | text/cleaners.py |
import re
_ampm_re = re.compile(r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
if matches[1] == 0 or matches[1] == '0' or matches[1] == '00':
pass
else:
txt += ' ' + matches[1]
if matches[2][0] == 'a':
txt += ' AM'
elif matches[2][0] == 'p':
txt += ' PM'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
| flowtron-master | text/datestime.py |
import re
from .cmudict import CMUDict
_letter_to_arpabet = {
'A': 'EY1',
'B': 'B IY1',
'C': 'S IY1',
'D': 'D IY1',
'E': 'IY1',
'F': 'EH1 F',
'G': 'JH IY1',
'H': 'EY1 CH',
'I': 'AY1',
'J': 'JH EY1',
'K': 'K EY1',
'L': 'EH1 L',
'M': 'EH1 M',
'N': 'EH1 N',
'O': 'OW1',
'P': 'P IY1',
'Q': 'K Y UW1',
'R': 'AA1 R',
'S': 'EH1 S',
'T': 'T IY1',
'U': 'Y UW1',
'V': 'V IY1',
'X': 'EH1 K S',
'Y': 'W AY1',
'W': 'D AH1 B AH0 L Y UW0',
'Z': 'Z IY1',
's': 'Z'
}
# must ignore roman numerals
_acronym_re = re.compile(r'([A-Z][A-Z]+)s?|([A-Z]\.([A-Z]\.)+s?)')
cmudict = CMUDict('data/cmudict_dictionary', keep_ambiguous=False)
def _expand_acronyms(m, add_spaces=True):
acronym = m.group(0)
# remove dots if they exist
acronym = re.sub('\.', '', acronym)
acronym = "".join(acronym.split())
arpabet = cmudict.lookup(acronym)
if arpabet is None:
acronym = list(acronym)
arpabet = ["{" + _letter_to_arpabet[letter] + "}" for letter in acronym]
# temporary fix
if arpabet[-1] == '{Z}' and len(arpabet) > 1:
arpabet[-2] = arpabet[-2][:-1] + ' ' + arpabet[-1][1:]
del arpabet[-1]
arpabet = ' '.join(arpabet)
else:
arpabet = "{" + arpabet[0] + "}"
return arpabet
def normalize_acronyms(text):
text = re.sub(_acronym_re, _expand_acronyms, text)
return text
| flowtron-master | text/acronyms.py |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
c.NotebookApp.allow_remote_access = True
## Whether to allow the user to run the notebook as root.
c.NotebookApp.allow_root = True
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
#c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '0.0.0.0'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
#c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for
# use by the buffer manager.
#c.NotebookApp.max_buffer_size = 536870912
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/work'
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| tensorrt-laboratory-master | jupyter_notebook_config.py |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import subprocess
models = [
("ResNet-50-deploy.prototxt", "prob"),
# ("ResNet-152-deploy.prototxt", "prob"),
]
precisions = [
# ("fp32", ""),
("fp16", "--fp16"),
# ("int8", "--int8")
]
def main():
for model, o in models:
for name, p in precisions:
for b in [1, 8]: #, 2, 4, 8]:
n = "b{}-{}".format(b, name)
e = model.replace("prototxt", "engine")
e = e.replace("deploy", n)
m = os.path.join("/work/models", model)
if os.path.isfile(e):
continue
subprocess.call("trtexec --deploy={} --batch={} --output={} {} --engine={}".format(
m, b, o, p, e
), shell=True)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | models/setup.py |
import os
import argparse
import numpy as np
import pycuda.driver as cuda
import tensorrt as trt
try:
# Sometimes python2 does not understand FileNotFoundError
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def GiB(val):
return val * 1 << 30
def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[]):
'''
Parses sample arguments.
Args:
description (str): Description of the sample.
subfolder (str): The subfolder containing data relevant to this sample
find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.
Returns:
str: Path of data directory.
Raises:
FileNotFoundError
'''
kDEFAULT_DATA_ROOT = os.path.abspath("/usr/src/tensorrt/data")
parser = argparse.ArgumentParser(description=description)
# Standard command-line arguments for all samples.
parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory.")
args, unknown_args = parser.parse_known_args()
# If data directory is not specified, use the default.
data_root = args.datadir if args.datadir else kDEFAULT_DATA_ROOT
data_path = os.path.join(data_root, subfolder) if subfolder else data_root
# Make sure data directory exists.
if not (os.path.exists(data_path)):
raise FileNotFoundError(data_path + " does not exist. Please provide the correct data path with the -d option.")
# Find all requested files.
for index, f in enumerate(find_files):
find_files[index] = os.path.abspath(os.path.join(data_path, f))
if not os.path.exists(find_files[index]):
raise FileNotFoundError(find_files[index] + " does not exist. Please provide the correct data path with the -d option.")
if find_files:
return data_path, find_files
else:
return data_path
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
| tensorrt-laboratory-master | models/onnx/common.py |
# This sample uses an ONNX ResNet50 Model to create a TensorRT Inference Engine
import random
from PIL import Image
import numpy as np
import ctypes
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], "."))
import common
def softmax(X, theta = 1.0, axis = None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis = axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
class ModelData(object):
MODEL_PATH = "/work/models/flowers-152.onnx"
INPUT_SHAPE = (3, 224, 224)
# We can convert TensorRT data types to numpy types with trt.nptype()
DTYPE = trt.float32
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Allocate host and device buffers, and create a stream.
def allocate_buffers(engine):
# Determine dimensions and create page-locked memory buffers (i.e. won't be swapped to disk) to hold host inputs/outputs.
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(ModelData.DTYPE))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(ModelData.DTYPE))
# Allocate device memory for inputs and outputs.
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
return h_input, d_input, h_output, d_output, stream
def do_inference(context, h_input, d_input, h_output, d_output, stream):
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input, h_input, stream)
# Run inference.
context.execute_async(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# The Onnx path is used for Onnx models.
def build_engine_onnx(model_file, calibrator=None):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = common.GiB(1)
builder.max_batch_size = 8
precision = "fp32"
if calibrator:
builder.int8_mode = True
builder.int8_calibrator = calibrator
precision = "int8"
else:
builder.fp16_mode = True
precision = "fp16"
# Load the Onnx model and parse it in order to populate the TensorRT network.
with open(model_file, 'rb') as model:
parser.parse(model.read())
engine = builder.build_cuda_engine(network)
serialized = engine.serialize()
with open("/work/models/flowers-152-b{}-{}.engine".format(builder.max_batch_size, precision), "wb") as file:
file.write(serialized)
return engine
def normalize_image(image_name):
image = Image.open(image_name)
# Resize, antialias and transpose the image to CHW.
c, h, w = ModelData.INPUT_SHAPE
image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1]).astype(trt.nptype(ModelData.DTYPE)).ravel()
# This particular ResNet50 model requires some preprocessing, specifically, mean normalization.
return ((image_arr / 255.0) - 0.5) * 2.0
def load_normalized_test_case(test_image, pagelocked_buffer):
# Normalize the image and copy to pagelocked memory.
np.copyto(pagelocked_buffer, normalize_image(test_image))
return test_image
def create_calibration_dataset():
jpegs = []
for dirpath, subdirs, files in os.walk("/work/models/flowers-data/flowers"):
for f in files:
if f.endswith("jpg"):
jpegs.append(os.path.join(dirpath, f))
random.shuffle(jpegs)
return jpegs[:200]
class ImageBatchStream:
def __init__(self, batch_size, calibration_files):
c, h, w = ModelData.INPUT_SHAPE
self.batch_size = batch_size
self.files = calibration_files
self.batch = 0
self.max_batches = (len(calibration_files) // batch_size) + \
(1 if (len(calibration_files) % batch_size) else 0)
self.calibration_data = np.zeros((batch_size, c, h, w), dtype=np.float32)
def reset(self):
self.batch = 0
def next_batch(self):
c, h, w = ModelData.INPUT_SHAPE
if self.batch < self.max_batches:
imgs = []
files_for_batch = self.files[self.batch_size * self.batch : \
self.batch_size * (self.batch + 1)]
for f in files_for_batch:
print("[ImageBatchStream] Processing ", f)
img = normalize_image(f)
imgs.append(img.reshape((c, h, w)))
for i in range(len(imgs)):
self.calibration_data[i] = imgs[i]
self.batch += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return np.array([])
class MyEntropyCalibrator(trt.IInt8EntropyCalibrator):
def __init__(self, stream):
trt.IInt8EntropyCalibrator.__init__(self)
self.batchstream = stream
self.d_input = cuda.mem_alloc(self.batchstream.calibration_data.nbytes)
stream.reset()
def get_batch_size(self):
return self.batchstream.batch_size
def get_batch(self, bindings, names):
batch = self.batchstream.next_batch()
if not batch.size:
return None
cuda.memcpy_htod(self.d_input, batch)
bindings[0] = int(self.d_input)
return bindings
def read_calibration_cache(self, length):
return None
def write_calibration_cache(self, ptr, size):
# cache = ctypes.c_char_p(int(ptr))
# with open('calibration_cache.bin', 'wb') as f:
# f.write(cache.value)
return None
def main():
calibration_files = create_calibration_dataset()
batch_stream = ImageBatchStream(8, calibration_files)
int8_calibrator = None
int8_calibrator = MyEntropyCalibrator(batch_stream)
engine = build_engine_onnx("/work/models/flowers-152.onnx", calibrator=int8_calibrator)
# serialized = engine.serialize()
# with open("/work/models/flowers-152-b8-int8.engine", "wb") as file:
# file.write(serialized)
# h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
# with engine.create_execution_context() as context:
# for test_image in ["/work/models/flowers-data/test/image_07927.jpg",
# "/work/models/flowers-data/test/image_06969.jpg",]:
# #test_image = "/work/models/flowers-data/test/image_07927.jpg" # 13 - blanket flower
# #test_image = "/work/models/flowers-data/test/image_06969.jpg" # 0 - alpine sea holly
# test_case = load_normalized_test_case(test_image, h_input)
# do_inference(context, h_input, d_input, h_output, d_output, stream)
# # We use the highest probability as our prediction. Its index corresponds to the predicted label.
# pred = np.argmax(h_output)
# score = softmax(h_output)[pred]
# print("Recognized " + test_case + " as " + str(pred) + " score: " + str(score))
def old_main():
# Set the data path to the directory that contains the trained models and test images for inference.
data_path, data_files = common.find_sample_data(description="Runs a ResNet50 network with a TensorRT inference engine.", subfolder="resnet50", find_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg", ModelData.MODEL_PATH, "class_labels.txt"])
# Get test images, models and labels.
test_images = data_files[0:3]
onnx_model_file, labels_file = data_files[3:]
labels = open(labels_file, 'r').read().split('\n')
# Build a TensorRT engine.
with build_engine_onnx(onnx_model_file) as engine:
# Inference is the same regardless of which parser is used to build the engine, since the model architecture is the same.
# Allocate buffers and create a CUDA stream.
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
# Contexts are used to perform inference.
with engine.create_execution_context() as context:
# Load a normalized test case into the host input page-locked buffer.
test_image = random.choice(test_images)
test_case = load_normalized_test_case(test_image, h_input)
# Run the engine. The output will be a 1D tensor of length 1000, where each value represents the
# probability that the image corresponds to that label
do_inference(context, h_input, d_input, h_output, d_output, stream)
# We use the highest probability as our prediction. Its index corresponds to the predicted label.
pred = np.argmax(h_output)
print("Recognized " + test_case + " as " + pred)
if __name__ == '__main__':
main()
| tensorrt-laboratory-master | models/onnx/onnx_builder.py |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import subprocess
models = [
("ResNet-50-deploy.prototxt", "prob"),
# ("ResNet-152-deploy.prototxt", "prob"),
]
precisions = [
("fp32", ""),
("fp16", "--fp16"),
("int8", "--int8")
]
def main():
for model, o in models:
for name, p in precisions:
for b in [1]: #, 2, 4, 8]:
n = "b{}-{}".format(b, name)
e = model.replace("prototxt", "engine")
e = e.replace("deploy", n)
m = os.path.join("/work/models", model)
if os.path.isfile(e):
continue
subprocess.call("giexec --deploy={} --batch={} --output={} {} --engine={}".format(
m, b, o, p, e
), shell=True)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/98_MultiProcessSingleStream/setup.py |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import subprocess
import click
import int8
precision_opts = {
"fp32": "",
"fp16": "--fp16",
"int8": "--fp16 --int8",
}
File = click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True)
@click.command()
@click.option("--batch", type=click.IntRange(min=1, max=32), multiple=True)
@click.option("--precision", type=click.Choice(["fp32", "fp16", "int8"]), multiple=True)
@click.argument("models", type=File, nargs=-1)
def main(models, batch, precision):
for model in models:
#click.echo(model)
#click.echo(precision)
for p in precision:
#click.echo(p)
for b in batch:
#click.echo(b)
n = "b{}-{}".format(b, p)
m = os.path.basename(model)
m, ext = os.path.splitext(m)
e = "{}-{}.{}".format(m,n,"engine")
if os.path.isfile(e):
print("A TensorRT engine {} already exists! Skipping...".format(e))
continue
elif p == "int8":
assert os.path.isdir("./calibration_images"), "Need to download calibration images before creating INT8 engine!"
int8.build_int8_engine_onnx(model, "./calibration_images", b, 32, e)
else:
subprocess.call("trtexec --onnx={} --batch={} {} --saveEngine={}".format(model, b, precision_opts.get(p), e), shell=True)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/ONNX/resnet50/build.py |
#!/usr/bin/env python3
import os
import time
import trtlab
import onnx_utils as utils
import numpy as np
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet.gluon.data.vision import transforms
from imagenet_labels import labels
import click
tests = {}
def tensorrt_init(engines):
manager = trtlab.InferenceManager(max_exec_concurrency=4)
runners = []
for engine in engines:
name, _ = os.path.splitext(os.path.basename(engine))
runners.append(manager.register_tensorrt_engine(name, engine))
manager.update_resources()
return runners
def infer_image(runner, image):
inputs = preprocess_image(runner, image)
future = runner.infer(**inputs)
result = future.get()
for name, tensor in result.items():
tensor = tensor.reshape(1000)
idx = np.argmax(tensor)
print("\n*** Results ***")
print(labels[idx], tensor[idx])
print("")
def preprocess_image(runner, image_path):
inputs = runner.input_bindings()
keys = list(inputs.keys())
input_name = keys[0]
img = np.array(plt.imread(image_path))
img = transform_image(img)
return { input_name: img }
def transform_image(img):
transform_fn = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = transform_fn(mx.nd.array(img)).asnumpy()
img = np.expand_dims(img, axis=0) # batchify
return img
def validate_results(computed, expected):
keys = list(computed.keys())
output_name = keys[0]
output_value = computed[output_name]
np.testing.assert_almost_equal(output_value, expected[0], decimal=3)
print("-- Test Passed: All outputs {} match within 3 decimals".format(output_value.shape))
File = click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True)
Path = click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True)
@click.command()
@click.option("--image", type=File, multiple=True)
@click.argument("engine", type=File, nargs=1)
def main(engine, image):
runners = tensorrt_init([engine])
for runner in runners:
for img in image:
infer_image(runner, img)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/ONNX/resnet50/run_jpeg_test.py |
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet.gluon.data.vision import transforms
import numpy as np
from random import shuffle
class ONNXEntropyCalibrator(trt.IInt8EntropyCalibrator):
def __init__(self, image_dir, batch_size, calibration_batches, cache_file):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator.__init__(self)
self.cache_file = cache_file
# Get a list of all the images in the image directory.
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir)]
shuffle(image_files)
if len(image_files) < calibration_batches * batch_size:
print("Only found enough images for {} batches instead of {}, continuing anyway...".format(len(image_files) // batch_size, calibration_batches))
self.image_files = image_files
else:
self.image_files = image_files[:calibration_batches * batch_size]
# Keeps track of current image in image list
self.current_image = 0
self.batch_size = batch_size
self.input_size = [3,224,224]
# Each element of the calibration data is a float32.
self.device_input = cuda.mem_alloc(self.batch_size * self.input_size[0] * self.input_size[1] * self.input_size[2] * trt.float32.itemsize)
# Create a generator that will give us batches. We can use next() to iterate over the result.
def load_batches():
while self.current_image < len(self.image_files):
data, images_read = self.read_image_batch()
self.current_image += images_read
yield data
self.batches = load_batches()
def transform_image(self, img):
transform_fn = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = transform_fn(mx.nd.array(img)).asnumpy()
return img
# This function is used to load calibration images into batches.
def read_image_batch(self):
# Depending on batch size and number of images, the final batch might only be partially full.
images_to_read = min(self.batch_size, len(self.image_files) - self.current_image)
host_buffer = np.zeros(shape=[self.batch_size]+self.input_size)
for i in range(images_to_read):
img = np.array(plt.imread(self.image_files[self.current_image]))
img = self.transform_image(img)
host_buffer[i,:,:,:] = img
return host_buffer, images_to_read
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
try:
# Get a single batch.
data = np.ascontiguousarray(next(self.batches), np.float32)
# Copy to device, then return a list containing pointers to input device buffers.
cuda.memcpy_htod(self.device_input, data)
return [int(self.device_input)]
except StopIteration:
# When we're out of batches, we return either [] or None.
# This signals to TensorRT that there is no calibration data remaining.
return None
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
| tensorrt-laboratory-master | examples/ONNX/resnet50/calibrator.py |
#!/usr/bin/env python3
import os
import trtlab
import numpy as np
import click
import onnx_utils as utils
tests = {}
def tensorrt_init(engines):
manager = trtlab.InferenceManager(max_exec_concurrency=4)
runners = []
for engine in engines:
name, _ = os.path.splitext(os.path.basename(engine))
runners.append(manager.register_tensorrt_engine(name, engine))
manager.update_resources()
return runners
def test_data(test_path):
for path, dirs, files in os.walk(test_path):
if os.path.basename(path).startswith("test_"):
tests[path] = files
for path, files in tests.items():
inputs = utils.load_inputs(path)
outputs = utils.load_outputs(path)
print("** Testing {} **".format(path))
yield inputs, outputs
def run_test(runner, inputs, outputs):
inputs = preprocess_inputs(runner, inputs)
future = runner.infer(**inputs)
result = future.get()
validate_results(result, outputs)
def preprocess_inputs(runner, inputs):
expected_input = runner.input_bindings()
if len(expected_input) != len(inputs):
raise RuntimeError("mismatched number of inputs")
keys = list(expected_input.keys())
input_name = keys[0]
info = expected_input[keys[0]]
shape = info['shape']
tensor = inputs[0]
batch_size = tensor.shape[0]
if list(shape) != list(tensor.shape[1:]):
raise RuntimeError("mismatched input dimensions")
return { input_name: tensor }
def validate_results(computed, expected):
keys = list(computed.keys())
output_name = keys[0]
output_value = computed[output_name]
np.testing.assert_almost_equal(output_value, expected[0], decimal=3)
print("-- Test Passed: All outputs {} match within 3 decimals".format(output_value.shape))
File = click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True)
Path = click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True)
@click.command()
@click.option("--tests", type=Path, default="resnet50")
@click.argument("engine", type=File, nargs=1)
def main(engine, tests):
runners = tensorrt_init([engine])
for runner in runners:
for inputs, outputs in test_data(tests):
run_test(runner, inputs, outputs)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/ONNX/resnet50/run_onnx_tests.py |
labels = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
| tensorrt-laboratory-master | examples/ONNX/resnet50/imagenet_labels.py |
#!/usr/bin/env python3
import glob
import os
import onnx
from onnx import numpy_helper
from matplotlib import pyplot as plt
import numpy as np
def load_inputs(test_data_dir):
# Load inputs
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
return inputs
def load_outputs(test_data_dir):
# Load reference outputs
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
return ref_outputs
def mnist_image(data):
two_d = (np.reshape(data, (28, 28))).astype(np.uint8)
plt.imshow(two_d, interpolation='nearest')
return plt
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
| tensorrt-laboratory-master | examples/ONNX/resnet50/onnx_utils.py |
import calibrator
import tensorrt as trt
# Use TensorRT ONNX parser to parse model file, and enable INT8 calibration during engine construction
def build_int8_engine_onnx(model_file, image_dir, batch_size, calibration_batches, engine_file, cache_file='INT8CalibrationTable'):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# Load the Onnx model and parse it in order to populate the TensorRT network.
with open(model_file, 'rb') as model:
parser.parse(model.read())
# Allow builder to use INT8 or FP16 kernels when building engine
builder.int8_mode = True
builder.fp16_mode = True
calib = calibrator.ONNXEntropyCalibrator(image_dir, batch_size, calibration_batches, cache_file)
builder.int8_calibrator = calib
builder.max_batch_size = batch_size
engine = builder.build_cuda_engine(network)
with open(engine_file, 'wb') as f:
f.write(engine.serialize())
| tensorrt-laboratory-master | examples/ONNX/resnet50/int8.py |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import inspect
import shutil
import tempfile
import click
from jinja2 import Environment, FileSystemLoader, Template
def render(template_path, data=None, extensions=None, strict=False):
data = data or {}
extensions = extensions or []
env = Environment(
loader=FileSystemLoader(os.path.dirname(template_path)),
extensions=extensions,
keep_trailing_newline=True,
)
if strict:
from jinja2 import StrictUndefined
env.undefined = StrictUndefined
# Add environ global
env.globals['environ'] = os.environ.get
return env.get_template(os.path.basename(template_path)).render(data)
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
FileType = click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True)
@click.command()
@click.option("-n", default=1)
@click.option("--template", type=FileType, default=os.path.join(script_path, "lb-envoy.j2"))
def main(n, template):
envoy = shutil.which("envoy")
if not os.path.isfile(envoy):
raise RuntimeError("envoy executable not found in currently directory: {}".format(envoy))
ports = [50051 + p for p in range(n)]
print("load balancing over ports: ", [str(p) for p in ports])
with open("/tmp/lb-envoy.yaml", "w") as file:
file.write(render(template, data={"ports": ports}))
# os.system("{} -c /tmp/lb-envoy.yaml".format(envoy))
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/99_LoadBalancer/run_loadbalancer.py |
import os
import deploy_image_client as cpp_client
def main():
if not os.environ.get("TRTLAB_ROUTING_TEST"):
raise RuntimeError(
"Plese run this script in the environment setup by test_routing.sh")
router = cpp_client.ImageClient("localhost:50050")
print("Testing Classify RPC")
a = router.classify("model_a", "via_router_uuid1").get()
b = router.classify("model_b", "via_router_uuid2").get()
c = router.classify("model_c", "via_router_uuid3").get()
assert a.uuid == "model_a"
assert b.uuid == "model_b"
assert c.uuid == "general_pool"
print("Testing Detection RPC")
a = router.detection("model_a", "via_router_uuid1").get()
b = router.detection("model_b", "via_router_uuid2").get()
c = router.detection("model_c", "via_router_uuid3").get()
assert a.uuid == "general_pool"
assert b.uuid == "model_b"
assert c.uuid == "general_pool"
print("\n**** Test Passed ****\n")
if __name__ == "__main__":
try:
main()
except RuntimeError as e:
print("\n**** Error ****")
print(e)
print()
| tensorrt-laboratory-master | examples/Deployment/RouteRequests/test_client.py |
import os
import boto3
s3 = boto3.client("s3", use_ssl=False, verify=False,
endpoint_url=os.environ.get("AWS_ENDPOINT_URL"))
response = s3.list_buckets()
buckets = [b["Name"] for b in response["Buckets"]]
if "images" not in buckets:
s3.create_bucket(Bucket="images")
response = s3.list_buckets()
buckets = [b["Name"] for b in response["Buckets"]]
print(buckets)
| tensorrt-laboratory-master | examples/Deployment/ObjectStore/create_buckets.py |
## Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of NVIDIA CORPORATION nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import os
import uuid
import boto3
import deploy_image_client as client
class ImageClient:
def __init__(self, *, hostname = "trt.lab"):
self._cpp_client = client.ImageClient(hostname)
self._s3_client = self._get_s3_client()
def classify(self, image_path, model):
key = self._upload_to_s3(image_path)
return self._cpp_client.classify(key, model)
def object_detection(self, image_path, model):
key = self._upload_to_s3(image_path)
return self._cpp_client.object_detection(key, model)
def _get_s3_client(self):
kwargs = {}
if os.environ.get("AWS_ENDPOINT_URL"):
kwargs = {
endpoint_url: os.environ.get("AWS_ENDPOINT_URL"),
use_ssl: False,
verify: False,
}
return boto3.client("s3", **kwargs)
def _check_if_file(self, file_path):
if not os.path.isfile(file_path):
raise RuntimeError("{} is not a file".format(file_path))
def _upload_to_s3(self, image_path):
self._check_if_file(image_path)
key = str(uuid.uuid4())
with open(image_path, "rb") as data:
self._s3_client.upload_fileobj(data, 'images', key)
return key
| tensorrt-laboratory-master | examples/Deployment/ImageClient/client.py |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import simple_pb2 as simple__pb2
class InferenceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Compute = channel.unary_unary(
'/simple.Inference/Compute',
request_serializer=simple__pb2.Input.SerializeToString,
response_deserializer=simple__pb2.Output.FromString,
)
self.BatchedCompute = channel.stream_stream(
'/simple.Inference/BatchedCompute',
request_serializer=simple__pb2.Input.SerializeToString,
response_deserializer=simple__pb2.Output.FromString,
)
class InferenceServicer(object):
# missing associated documentation comment in .proto file
pass
def Compute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchedCompute(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InferenceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compute': grpc.unary_unary_rpc_method_handler(
servicer.Compute,
request_deserializer=simple__pb2.Input.FromString,
response_serializer=simple__pb2.Output.SerializeToString,
),
'BatchedCompute': grpc.stream_stream_rpc_method_handler(
servicer.BatchedCompute,
request_deserializer=simple__pb2.Input.FromString,
response_serializer=simple__pb2.Output.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'simple.Inference', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| tensorrt-laboratory-master | examples/03_Batching/simple_pb2_grpc.py |
import grpc
import simple_pb2
import simple_pb2_grpc
def run():
with grpc.insecure_channel('localhost:50049') as channel:
stub = simple_pb2_grpc.InferenceStub(channel)
response = stub.Compute(simple_pb2.Input(batch_id=78))
print("Received msg with batch_id={}".format(response.batch_id))
if __name__ == "__main__":
run()
| tensorrt-laboratory-master | examples/03_Batching/unary_client.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: simple.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='simple.proto',
package='simple',
syntax='proto3',
serialized_pb=_b('\n\x0csimple.proto\x12\x06simple\"\x19\n\x05Input\x12\x10\n\x08\x62\x61tch_id\x18\x01 \x01(\x04\"\x1a\n\x06Output\x12\x10\n\x08\x62\x61tch_id\x18\x01 \x01(\x04\x32n\n\tInference\x12*\n\x07\x43ompute\x12\r.simple.Input\x1a\x0e.simple.Output\"\x00\x12\x35\n\x0e\x42\x61tchedCompute\x12\r.simple.Input\x1a\x0e.simple.Output\"\x00(\x01\x30\x01\x62\x06proto3')
)
_INPUT = _descriptor.Descriptor(
name='Input',
full_name='simple.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_id', full_name='simple.Input.batch_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=49,
)
_OUTPUT = _descriptor.Descriptor(
name='Output',
full_name='simple.Output',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_id', full_name='simple.Output.batch_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=77,
)
DESCRIPTOR.message_types_by_name['Input'] = _INPUT
DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), dict(
DESCRIPTOR = _INPUT,
__module__ = 'simple_pb2'
# @@protoc_insertion_point(class_scope:simple.Input)
))
_sym_db.RegisterMessage(Input)
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
DESCRIPTOR = _OUTPUT,
__module__ = 'simple_pb2'
# @@protoc_insertion_point(class_scope:simple.Output)
))
_sym_db.RegisterMessage(Output)
_INFERENCE = _descriptor.ServiceDescriptor(
name='Inference',
full_name='simple.Inference',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=79,
serialized_end=189,
methods=[
_descriptor.MethodDescriptor(
name='Compute',
full_name='simple.Inference.Compute',
index=0,
containing_service=None,
input_type=_INPUT,
output_type=_OUTPUT,
options=None,
),
_descriptor.MethodDescriptor(
name='BatchedCompute',
full_name='simple.Inference.BatchedCompute',
index=1,
containing_service=None,
input_type=_INPUT,
output_type=_OUTPUT,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_INFERENCE)
DESCRIPTOR.services_by_name['Inference'] = _INFERENCE
# @@protoc_insertion_point(module_scope)
| tensorrt-laboratory-master | examples/03_Batching/simple_pb2.py |
import grpc
import simple_pb2
import simple_pb2_grpc
def run():
with grpc.insecure_channel('localhost:50051') as channel:
stub = simple_pb2_grpc.InferenceStub(channel)
def requests():
messages = [simple_pb2.Input(batch_id=i) for i in range(10)]
for msg in messages:
print("Sending Stream batch_id={}".format(msg.batch_id))
yield msg
responses = stub.BatchedCompute(requests())
for resp in responses:
print("Received msg on stream with batch_id={}".format(resp.batch_id))
if __name__ == "__main__":
run()
| tensorrt-laboratory-master | examples/03_Batching/simple_batching_client.py |
#!/usr/bin/env python3
import os
import time
import numpy as np
import infer
import infer_test_utils as utils
def main():
models = infer.InferenceManager(max_exec_concurrency=2)
mnist = models.register_tensorrt_engine("mnist", "/work/models/onnx/mnist-v1.3/mnist-v1.3.engine")
models.update_resources()
print("Input Bindings: {}".format(mnist.input_bindings()))
print("Output Bindings: {}".format(mnist.output_bindings()))
inputs = utils.load_inputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
expected = utils.load_outputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
start = time.process_time()
results = [mnist.infer(Input3=input) for input in inputs]
results = [r.get() for r in results]
print("Compute Time: {}".format(time.process_time() - start))
for r, e in zip(results, expected):
for key, val in r.items():
print("Output Binding Name: {}; shape{}".format(key, val.shape))
r = val.reshape((1,10))
np.testing.assert_almost_equal(r, e, decimal=3)
models.serve()
#mnist_model = models.get_model("mnist")
#benchmark = infer.InferBench(models)
#benchmark.run(mnist_model, 1, 0.1)
#print(results)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/30_PyTensorRT/server.py |
#!/usr/bin/env python3
import os
import time
import numpy as np
import infer
import infer_test_utils as utils
def main():
manager = infer.RemoteInferenceManager(hostname="localhost:50052")
models = manager.get_models()
print(models)
mnist = manager.infer_runner("mnist")
print("Input Bindings: {}".format(mnist.input_bindings()))
print("Output Bindings: {}".format(mnist.output_bindings()))
inputs = utils.load_inputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
expected = utils.load_outputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
start = time.process_time()
results = [mnist.infer(Input3=input) for input in inputs]
results = [r.get() for r in results]
print("Compute Time: {}".format(time.process_time() - start))
print(results)
# for r, e in zip(results, expected):
# for key, val in r.items():
# print("Output Binding Name: {}; shape{}".format(key, val.shape))
# r = val.reshape((1,10))
# np.testing.assert_almost_equal(r, e, decimal=3)
# models.serve()
#mnist_model = models.get_model("mnist")
#benchmark = infer.InferBench(models)
#benchmark.run(mnist_model, 1, 0.1)
#print(results)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/30_PyTensorRT/client.py |
#!/usr/bin/env python3
import glob
import os
import onnx
from onnx import numpy_helper
from matplotlib import pyplot as plt
import numpy as np
def load_inputs(test_data_dir):
# Load inputs
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
return inputs
def load_outputs(test_data_dir):
# Load reference outputs
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
return ref_outputs
def mnist_image(data):
two_d = (np.reshape(data, (28, 28))).astype(np.uint8)
plt.imshow(two_d, interpolation='nearest')
return plt
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
| tensorrt-laboratory-master | examples/30_PyTensorRT/infer_test_utils.py |
#!/usr/bin/env python3
import itertools
import os
import time
import numpy as np
import trtlab
import infer_test_utils as utils
def main():
models = trtlab.InferenceManager(max_exec_concurrency=1)
mnist = models.register_tensorrt_engine("mnist", "/work/models/onnx/mnist-v1.3/mnist-v1.3.engine")
models.update_resources()
print("Input Bindings: {}".format(mnist.input_bindings()))
print("Output Bindings: {}".format(mnist.output_bindings()))
inputs = utils.load_inputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
expected = utils.load_outputs("/work/models/onnx/mnist-v1.3/test_data_set_0")
start = time.process_time()
while True:
futures = [mnist.infer(Input3=inputs[0]) for _ in range(100)]
results = [f.get() for f in futures]
# while True:
# results = [mnist.infer(Input3=input) for input in itertools.repeat(inputs[0], 1000)]
# results = [r.get() for r in results]
# time.sleep(0.1)
print("Compute Time: {}".format(time.process_time() - start))
# for r, e in zip(results, expected):
# for key, val in r.items():
# print("Output Binding Name: {}; shape{}".format(key, val.shape))
# r = val.reshape((1,10))
# np.testing.assert_almost_equal(r, e, decimal=3)
#mnist_model = models.get_model("mnist")
#benchmark = infer.InferBench(models)
#benchmark.run(mnist_model, 1, 0.1)
#print(results)
if __name__ == "__main__":
main()
| tensorrt-laboratory-master | examples/30_PyTensorRT/compute.py |
/work/examples/30_PyTensorRT/infer_test_utils.py | tensorrt-laboratory-master | notebooks/infer_test_utils.py |
import os
import subprocess
for filename in os.listdir('./'):
if os.access(filename, os.X_OK) and os.path.isfile(filename):
f=open("APM_%s.txt" %filename, "w")
print("Running %s" %filename)
p1=subprocess.Popen(
[
"./%s" %filename
],
stdout=f,
)
print("Wait")
p1.wait()
print("Done")
f.close()
| cuda-samples-master | bin/x86_64/linux/release/go.py |
#!/usr/bin/env python
from string import *
import os, getopt, sys, platform
g_Header = '''/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// This file is auto-generated, do not edit
////////////////////////////////////////////////////////////////////////////////
'''
def Usage():
print("Usage: ptx2c.py in out")
print("Description: performs embedding in.cubin or in.ptx file into out.c and out.h files as character array")
sys.exit(0)
def FormatCharHex(d):
s = hex(ord(d))
if len(s) == 3:
s = "0x0" + s[2]
return s
args = sys.argv[1:]
if not(len(sys.argv[1:]) == 2):
Usage()
out_h = args[1] + "_ptxdump.h"
out_c = args[1] + "_ptxdump.c"
h_in = open(args[0], 'r')
source_bytes = h_in.read()
source_bytes_len = len(source_bytes)
h_out_c = open(out_c, 'w')
h_out_c.writelines(g_Header)
h_out_c.writelines("#include \"" + out_h + "\"\n\n")
h_out_c.writelines("unsigned char " + args[1] + "_ptxdump[" + str(source_bytes_len+1) + "] = {\n")
h_out_h = open(out_h, 'w')
macro_h = "__" + args[1] + "_ptxdump_h__"
h_out_h.writelines(g_Header)
h_out_h.writelines("#ifndef " + macro_h + "\n")
h_out_h.writelines("#define " + macro_h + "\n\n")
h_out_h.writelines('#if defined __cplusplus\nextern "C" {\n#endif\n\n')
h_out_h.writelines("extern unsigned char " + args[1] + "_ptxdump[" + str(source_bytes_len+1) + "];\n\n")
h_out_h.writelines("#if defined __cplusplus\n}\n#endif\n\n")
h_out_h.writelines("#endif //" + macro_h + "\n")
newlinecnt = 0
for i in range(0, source_bytes_len):
h_out_c.write(FormatCharHex(source_bytes[i]) + ", ")
newlinecnt += 1
if newlinecnt == 16:
newlinecnt = 0
h_out_c.write("\n")
h_out_c.write("0x00\n};\n")
h_in.close()
h_out_c.close()
h_out_h.close()
print("ptx2c: CUmodule " + args[0] + " packed successfully")
| cuda-samples-master | Samples/0_Introduction/matrixMulDynlinkJIT/extras/ptx2c.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for hpccm."""
import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
# Get the package version from hpccm/version.py
version = {}
with open(os.path.join(here, 'hpccm', 'version.py')) as fp:
exec(fp.read(), version)
# Get the long description from the README file
with open(os.path.join(here, 'README.md')) as fp:
long_description = fp.read()
setup(
name='hpccm',
version=version['__version__'],
description='HPC Container Maker',
long_description=long_description,
long_description_content_type='text/markdown',
maintainer='Scott McMillan',
maintainer_email='smcmillan@nvidia.com',
license='Apache License Version 2.0',
url='https://github.com/NVIDIA/hpc-container-maker',
packages=find_packages(),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
# Make hpccm.cli.main available from the command line as `hpccm`.
install_requires=['archspec', "enum34; python_version < '3.4'", 'six'],
entry_points={
'console_scripts': [
'hpccm=hpccm.cli:main']})
| hpc-container-maker-master | setup.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the comment module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import docker, invalid_ctype, singularity
from hpccm.primitives.comment import comment
class Test_comment(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No comment string specified"""
c = comment()
self.assertEqual(str(c), '')
@docker
def test_empty_noreformat(self):
"""No comment string specified, reformatting disabled"""
c = comment(reformat=False)
self.assertEqual(str(c), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified
Assumes default comment format."""
c = comment('foo')
self.assertEqual(str(c), '# foo')
@docker
def test_comment_docker(self):
"""Comment string specified"""
c = comment('foo')
self.assertEqual(str(c), '# foo')
@singularity
def test_comment_singularity(self):
"""Comment string specified"""
c = comment('foo')
self.assertEqual(str(c), '# foo')
@docker
def test_noreformat(self):
"""Disable reformatting"""
c = comment('foo\nbar', reformat=False)
self.assertEqual(str(c), '# foo\n# bar')
@docker
def test_wrap(self):
"""Comment wrapping"""
c = comment('foo\nbar')
self.assertEqual(str(c), '# foo bar')
@docker
def test_merge_docker(self):
"""Comment merge"""
c = []
c.append(comment('a'))
c.append(comment('b'))
merged = c[0].merge(c)
self.assertEqual(str(merged), '# a\n# b')
@singularity
def test_merge_singularity(self):
"""Comment merge"""
c = []
c.append(comment('a'))
c.append(comment('b'))
merged = c[0].merge(c)
self.assertEqual(str(merged), '# a\n# b')
apphelp = c[0].merge(c, _app='foo')
self.assertEqual(str(apphelp), '%apphelp foo\na\nb')
| hpc-container-maker-master | test/test_comment.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the sed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.sed import sed
class Test_sed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_basic(self):
"""Basic sed"""
s = sed()
self.assertEqual(s.sed_step(file='foo',
patterns=[r's/a/A/g',
r's/FOO = BAR/FOO = BAZ/g']),
r'''sed -i -e s/a/A/g \
-e 's/FOO = BAR/FOO = BAZ/g' foo''')
def test_nofile(self):
"""No file specified"""
s = sed()
self.assertEqual(s.sed_step(patterns=[r's/a/A/g']), '')
def test_nopatterns(self):
"""No patterns specified"""
s = sed()
self.assertEqual(s.sed_step(file='foo'), '')
| hpc-container-maker-master | test/test_sed.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the toolchain module"""
from __future__ import unicode_literals
from __future__ import print_function
from copy import copy, deepcopy
import logging # pylint: disable=unused-import
import unittest
from hpccm.toolchain import toolchain
class Test_toolchain(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_creation(self):
"""Toolchain creation"""
t = toolchain(CC='gcc', CXX='g++', FC='gfortran')
self.assertEqual(t.CC, 'gcc')
self.assertEqual(t.CXX, 'g++')
self.assertEqual(t.FC, 'gfortran')
def test_modification(self):
"""Toolchain modification"""
t = toolchain(CC='gcc', CXX='g++', FC='gfortran')
t.CC = 'mygcc'
self.assertEqual(t.CC, 'mygcc')
self.assertEqual(t.CXX, 'g++')
self.assertEqual(t.FC, 'gfortran')
def test_copy(self):
"""Toolchain copies"""
t = toolchain(CC='gcc', CXX='g++', FC='gfortran')
r = t # ref
c = copy(t)
d = deepcopy(t)
t.CC = 'mygcc'
c.CC = 'cc'
self.assertEqual(t.CC, 'mygcc')
self.assertEqual(r.CC, 'mygcc')
self.assertEqual(c.CC, 'cc')
self.assertEqual(d.CC, 'gcc')
def test_vars(self):
"""Toolchain dictionaries"""
t = toolchain(CC='gcc', CXX='g++', FC='gfortran')
v = vars(t)
self.assertDictEqual(v, {'CC': 'gcc', 'CXX': 'g++', 'FC': 'gfortran'})
def test_unknown_keys(self):
"""Toolchain unknown keys"""
t = toolchain(FOO='bar')
with self.assertRaises(AttributeError):
f = t.FOO
| hpc-container-maker-master | test/test_toolchain.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the tar module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.tar import tar
class Test_tar(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_missing_tarball(self):
"""Missing tarball option"""
t = tar()
self.assertEqual(t.untar_step(), '')
def test_filetypes(self):
t = tar()
self.assertEqual(t.untar_step(tarball='foo.tar.bz2'),
'tar -x -f foo.tar.bz2 -j')
self.assertEqual(t.untar_step(tarball='foo.tar.gz'),
'tar -x -f foo.tar.gz -z')
self.assertEqual(t.untar_step(tarball='foo.tar.xz'),
'tar -x -f foo.tar.xz -J')
self.assertEqual(t.untar_step(tarball='foo.txz'),
'tar -x -f foo.txz -J')
self.assertEqual(t.untar_step(tarball='foo.tgz'),
'tar -x -f foo.tgz -z')
self.assertEqual(t.untar_step(tarball='foo.tar'),
'tar -x -f foo.tar')
self.assertEqual(t.untar_step(tarball='foo.unknown'),
'tar -x -f foo.unknown')
def test_directory(self):
"""Directory specified"""
t = tar()
self.assertEqual(t.untar_step(tarball='foo.tgz', directory='bar'),
'mkdir -p bar && tar -x -f foo.tgz -C bar -z')
def test_args(self):
"""Argument given"""
t = tar()
self.assertEqual(t.untar_step(tarball="foo.tar.gz",
args=["--strip-components=1"]),
'tar -x -f foo.tar.gz -z --strip-components=1')
| hpc-container-maker-master | test/test_tar.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the mkl module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.mkl import mkl
class Test_mkl(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults(self):
"""Default mkl building block, no eula agreement"""
with self.assertRaises(RuntimeError):
mkl()
@ubuntu
@docker
def test_basic_ubuntu(self):
"""Default mkl building block"""
m = mkl(eula=True)
self.assertEqual(str(m),
r'''# MKL version 2020.0-088
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mkl all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mkl-64bit-2020.0-088 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> /etc/bash.bashrc''')
@centos
@docker
def test_basic_centos(self):
"""Default mkl building block"""
m = mkl(eula=True)
self.assertEqual(str(m),
r'''# MKL version 2020.0-088
RUN rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://yum.repos.intel.com/mkl/setup/intel-mkl.repo && \
yum install -y \
intel-mkl-64bit-2020.0-088 && \
rm -rf /var/cache/yum/*
RUN echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> /etc/bashrc''')
@ubuntu
@docker
def test_version(self):
"""Version option"""
m = mkl(eula=True, version='2018.2-046')
self.assertEqual(str(m),
r'''# MKL version 2018.2-046
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mkl all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mkl-64bit-2018.2-046 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> /etc/bash.bashrc''')
@ubuntu
@docker
def test_mklvars(self):
"""mklvars is False"""
m = mkl(eula=True, mklvars=False, version='2019.4-070')
self.assertEqual(str(m),
r'''# MKL version 2019.4-070
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mkl all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mkl-64bit-2019.4-070 && \
rm -rf /var/lib/apt/lists/*
ENV CPATH=/opt/intel/mkl/include:$CPATH \
LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LD_LIBRARY_PATH \
LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LIBRARY_PATH \
MKLROOT=/opt/intel/mkl''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
m = mkl(eula=True)
r = m.runtime()
self.assertEqual(r,
r'''# MKL version 2020.0-088
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mkl all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mkl-64bit-2020.0-088 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> /etc/bash.bashrc''')
| hpc-container-maker-master | test/test_mkl.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the intel_psxe_runtime module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu, x86_64
from hpccm.building_blocks.intel_psxe_runtime import intel_psxe_runtime
class Test_intel_psxe_runtime(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults(self):
"""Default intel_psxe_runtime building block, no eula agreement"""
with self.assertRaises(RuntimeError):
psxe_rt = intel_psxe_runtime()
str(psxe_rt)
@x86_64
@ubuntu
@docker
def test_defaults_eula(self):
"""eula"""
psxe_rt = intel_psxe_runtime(eula=True)
self.assertEqual(str(psxe_rt),
r'''# Intel Parallel Studio XE runtime version 2020.2-14
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gcc \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/2020/GPG-PUB-KEY-INTEL-PSXE-RUNTIME-2020 | apt-key add - && \
echo "deb https://apt.repos.intel.com/2020 intel-psxe-runtime main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends aptitude && \
aptitude install -y --without-recommends -o Aptitude::ProblemResolver::SolutionCost='100*canceled-actions,200*removals' \
intel-psxe-runtime=2020.2-14 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/psxe_runtime/linux/bin/psxevars.sh intel64" >> /etc/bash.bashrc''')
@x86_64
@ubuntu
@docker
def test_psxevars_false(self):
"""psxevars is false"""
psxe_rt = intel_psxe_runtime(eula=True, psxevars=False,
version='2019.5-281')
self.assertEqual(str(psxe_rt),
r'''# Intel Parallel Studio XE runtime version 2019.5-281
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gcc \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/2019/GPG-PUB-KEY-INTEL-PSXE-RUNTIME-2019 | apt-key add - && \
echo "deb https://apt.repos.intel.com/2019 intel-psxe-runtime main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends aptitude && \
aptitude install -y --without-recommends -o Aptitude::ProblemResolver::SolutionCost='100*canceled-actions,200*removals' \
intel-psxe-runtime=2019.5-281 && \
rm -rf /var/lib/apt/lists/*
ENV DAALROOT=/opt/intel/psxe_runtime/linux/daal \
FI_PROVIDER_PATH=/opt/intel/psxe_runtime/linux/mpi/intel64/libfabric/lib/prov \
IPPROOT=/opt/intel/psxe_runtime/linux/ipp \
I_MPI_ROOT=/opt/intel/psxe_runtime/linux/mpi \
LD_LIBRARY_PATH=/opt/intel/psxe_runtime/linux/daal/lib/intel64:/opt/intel/psxe_runtime/linux/compiler/lib/intel64_lin:/opt/intel/psxe_runtime/linux/compiler/lib/intel64_lin:/opt/intel/psxe_runtime/linux/ipp/lib/intel64:/opt/intel/psxe_runtime/linux/mkl/lib/intel64:/opt/intel/psxe_runtime/linux/mpi/intel64/lib:/opt/intel/psxe_runtime/linux/mpi/intel64/libfabric/lib:/opt/intel/psxe_runtime/linux/tbb/lib/intel64/gcc4.7:$LD_LIBRARY_PATH \
MKLROOT=/opt/intel/psxe_runtime/linux/mkl \
PATH=/opt/intel/psxe_runtime/linux/mpi/intel64/bin:/opt/intel/psxe_runtime/linux/mpi/intel64/libfabric/bin:$PATH''')
@x86_64
@centos
@docker
def test_component_off(self):
"""disable one of the runtimes"""
psxe_rt = intel_psxe_runtime(daal=False, eula=True,
version='2019.5-281')
self.assertEqual(str(psxe_rt),
r'''# Intel Parallel Studio XE runtime version 2019.5-281
RUN yum install -y \
man-db \
openssh-clients \
which && \
rm -rf /var/cache/yum/*
RUN yum install -y nextgen-yum4 && \
rpm --import https://yum.repos.intel.com/2019/setup/RPM-GPG-KEY-intel-psxe-runtime-2019 && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://yum.repos.intel.com/2019/setup/intel-psxe-runtime-2019.repo && \
yum4 install -y \
intel-icc-runtime-64bit-2019.5-281 \
intel-ifort-runtime-64bit-2019.5-281 \
intel-ipp-runtime-64bit-2019.5-281 \
intel-mkl-runtime-64bit-2019.5-281 \
intel-mpi-runtime-64bit-2019.5-281 \
intel-tbb-runtime-64bit-2019.5-281 && \
rm -rf /var/cache/yum/*
RUN echo "source /opt/intel/psxe_runtime/linux/bin/psxevars.sh intel64" >> /etc/bashrc''')
| hpc-container-maker-master | test/test_intel_psxe_runtime.py |
include('include2.py')
Stage0 += compiler
| hpc-container-maker-master | test/include3.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the workdir module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import bash, docker, invalid_ctype, singularity
from hpccm.primitives.workdir import workdir
class Test_workdir(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No workdir specified"""
w = workdir()
self.assertEqual(str(w), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified"""
w = workdir(directory='foo')
with self.assertRaises(RuntimeError):
str(w)
@docker
def test_dir_docker(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), 'WORKDIR foo')
@singularity
def test_dir_singularity(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), '%post\n cd /\n mkdir -p foo\n cd foo')
@bash
def test_dir_bash(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), '')
| hpc-container-maker-master | test/test_workdir.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the intel_mpi module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.intel_mpi import intel_mpi
class Test_intel_mpi(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults(self):
"""Default intel_mpi building block, no eula agreement"""
with self.assertRaises(RuntimeError):
intel_mpi()
@ubuntu
@docker
def test_basic_ubuntu(self):
"""Default intel_mpi building block"""
impi = intel_mpi(eula=True)
self.assertEqual(str(impi),
r'''# Intel MPI version 2019.6-088
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mpi all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mpi-2019.6-088 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> /etc/bash.bashrc''')
@centos
@docker
def test_basic_centos(self):
"""Default intel_mpi building block"""
impi = intel_mpi(eula=True)
self.assertEqual(str(impi),
r'''# Intel MPI version 2019.6-088
RUN yum install -y \
man-db \
openssh-clients && \
rm -rf /var/cache/yum/*
RUN rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
yum install -y yum-utils && \
yum-config-manager --add-repo https://yum.repos.intel.com/mpi/setup/intel-mpi.repo && \
yum install -y \
intel-mpi-2019.6-088 && \
rm -rf /var/cache/yum/*
RUN echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> /etc/bashrc''')
@ubuntu
@docker
def test_version(self):
"""Version option"""
impi = intel_mpi(eula=True, version='2018.2-046')
self.assertEqual(str(impi),
r'''# Intel MPI version 2018.2-046
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mpi all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mpi-2018.2-046 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> /etc/bash.bashrc''')
@ubuntu
@docker
def test_mpivars(self):
"""mpivars is False"""
impi = intel_mpi(eula=True, mpivars=False)
self.assertEqual(str(impi),
r'''# Intel MPI version 2019.6-088
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mpi all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mpi-2019.6-088 && \
rm -rf /var/lib/apt/lists/*
ENV FI_PROVIDER_PATH=/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/lib/prov \
I_MPI_ROOT=/opt/intel/compilers_and_libraries/linux/mpi \
LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries/linux/mpi/intel64/lib:/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/lib:$LD_LIBRARY_PATH \
PATH=/opt/intel/compilers_and_libraries/linux/mpi/intel64/bin:/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/bin:$PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
impi = intel_mpi(eula=True)
r = impi.runtime()
self.assertEqual(r,
r'''# Intel MPI version 2019.6-088
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
gnupg \
man-db \
openssh-client \
wget && \
rm -rf /var/lib/apt/lists/*
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB | apt-key add - && \
echo "deb https://apt.repos.intel.com/mpi all main" >> /etc/apt/sources.list.d/hpccm.list && \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
intel-mpi-2019.6-088 && \
rm -rf /var/lib/apt/lists/*
RUN echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> /etc/bash.bashrc''')
| hpc-container-maker-master | test/test_intel_mpi.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the netcdf module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.netcdf import netcdf
class Test_netcdf(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default netcdf building block"""
n = netcdf()
self.assertEqual(str(n),
r'''# NetCDF version 4.7.4, NetCDF C++ version 4.3.1, NetCDF Fortran
# version 4.5.3
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
file \
libcurl4-openssl-dev \
m4 \
make \
wget \
zlib1g-dev && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.7.4.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.7.4.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-c-4.7.4 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-c-4.7.4 /var/tmp/v4.7.4.tar.gz
ENV CPATH=/usr/local/netcdf/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/netcdf/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/netcdf/lib:$LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.1.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.1.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-cxx4-4.3.1 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-cxx4-4.3.1 /var/tmp/v4.3.1.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.5.3.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-fortran-4.5.3 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-fortran-4.5.3 /var/tmp/v4.5.3.tar.gz''')
@centos
@docker
def test_defaults_centos(self):
"""Default netcdf building block"""
n = netcdf()
self.assertEqual(str(n),
r'''# NetCDF version 4.7.4, NetCDF C++ version 4.3.1, NetCDF Fortran
# version 4.5.3
RUN yum install -y \
ca-certificates \
file \
libcurl-devel \
m4 \
make \
wget \
zlib-devel && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.7.4.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.7.4.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-c-4.7.4 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-c-4.7.4 /var/tmp/v4.7.4.tar.gz
ENV CPATH=/usr/local/netcdf/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/netcdf/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/netcdf/lib:$LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.1.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.1.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-cxx4-4.3.1 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-cxx4-4.3.1 /var/tmp/v4.3.1.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.5.3.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.5.3.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-fortran-4.5.3 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/netcdf-fortran-4.5.3 /var/tmp/v4.5.3.tar.gz''')
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
n = netcdf(ldconfig=True, version='4.6.1', version_cxx='4.3.0',
version_fortran='4.4.4')
self.assertEqual(str(n),
r'''# NetCDF version 4.6.1, NetCDF C++ version 4.3.0, NetCDF Fortran
# version 4.4.4
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
file \
libcurl4-openssl-dev \
m4 \
make \
wget \
zlib1g-dev && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-c/archive/v4.6.1.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.6.1.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-c-4.6.1 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/netcdf/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/netcdf-c-4.6.1 /var/tmp/v4.6.1.tar.gz
ENV CPATH=/usr/local/netcdf/include:$CPATH \
LIBRARY_PATH=/usr/local/netcdf/lib:$LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-cxx4/archive/v4.3.0.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.3.0.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-cxx4-4.3.0 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/netcdf/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/netcdf-cxx4-4.3.0 /var/tmp/v4.3.0.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/Unidata/netcdf-fortran/archive/v4.4.4.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v4.4.4.tar.gz -C /var/tmp -z && \
cd /var/tmp/netcdf-fortran-4.4.4 && ./configure --prefix=/usr/local/netcdf && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/netcdf/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/netcdf-fortran-4.4.4 /var/tmp/v4.4.4.tar.gz''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
n = netcdf()
r = n.runtime()
self.assertEqual(r,
r'''# NetCDF
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
zlib1g && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /usr/local/netcdf /usr/local/netcdf
ENV CPATH=/usr/local/netcdf/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/netcdf/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/netcdf/lib:$LIBRARY_PATH \
PATH=/usr/local/netcdf/bin:$PATH''')
| hpc-container-maker-master | test/test_netcdf.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the generic_autotools module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.toolchain import toolchain
class Test_generic_autotools(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default generic_autotools building block"""
g = generic_autotools(
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
cd /var/tmp/tcl8.6.9/unix && ./configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz''')
@ubuntu
@docker
def test_no_url(self):
"""missing url"""
with self.assertRaises(RuntimeError):
g = generic_autotools()
@ubuntu
@docker
def test_both_repository_and_url(self):
"""both repository and url"""
with self.assertRaises(RuntimeError):
g = generic_autotools(repository='foo', url='bar')
@ubuntu
@docker
def test_invalid_package(self):
"""invalid package url"""
with self.assertRaises(RuntimeError):
g = generic_autotools(url='https://foo/bar.sh')
@ubuntu
@docker
def test_package(self):
"""local package"""
g = generic_autotools(
package='packages/openmpi-4.0.1.tar.bz2',
prefix='/usr/local/openmpi')
self.assertEqual(str(g),
r'''# packages/openmpi-4.0.1.tar.bz2
COPY packages/openmpi-4.0.1.tar.bz2 /var/tmp/openmpi-4.0.1.tar.bz2
RUN mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-4.0.1.tar.bz2 -C /var/tmp -j && \
cd /var/tmp/openmpi-4.0.1 && ./configure --prefix=/usr/local/openmpi && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/openmpi-4.0.1 /var/tmp/openmpi-4.0.1.tar.bz2''')
@ubuntu
@docker
def test_pre_and_post(self):
"""Preconfigure and postinstall options"""
g = generic_autotools(
directory='tcl8.6.9/unix',
postinstall=['echo "post"'],
preconfigure=['echo "pre"'],
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
cd /var/tmp/tcl8.6.9/unix && \
echo "pre" && \
cd /var/tmp/tcl8.6.9/unix && ./configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
cd /usr/local/tcl && \
echo "post" && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz''')
@ubuntu
@docker
def test_configure_opts_check(self):
"""Configure options and check enabled"""
g = generic_autotools(
check=True,
configure_opts=['--disable-getpwuid',
'--enable-orterun-prefix-by-default'],
prefix='/usr/local/openmpi',
url='https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2')
self.assertEqual(str(g),
r'''# https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.1.tar.bz2 && \
mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-4.0.1.tar.bz2 -C /var/tmp -j && \
cd /var/tmp/openmpi-4.0.1 && ./configure --prefix=/usr/local/openmpi --disable-getpwuid --enable-orterun-prefix-by-default && \
make -j$(nproc) && \
make -j$(nproc) check && \
make -j$(nproc) install && \
rm -rf /var/tmp/openmpi-4.0.1 /var/tmp/openmpi-4.0.1.tar.bz2''')
@ubuntu
@docker
def test_build_environment_and_toolchain(self):
"""build environment and toolchain"""
tc = toolchain(CC='gcc', CXX='g++', FC='gfortran')
g = generic_autotools(
build_directory='/tmp/build',
build_environment={'FOO': 'BAR'},
directory='/var/tmp/tcl8.6.9/unix',
prefix='/usr/local/tcl',
toolchain=tc,
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
self.assertEqual(str(g),
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/tcl8.6.9-src.tar.gz -C /var/tmp -z && \
mkdir -p /tmp/build && cd /tmp/build && FOO=BAR CC=gcc CXX=g++ FC=gfortran /var/tmp/tcl8.6.9/unix/configure --prefix=/usr/local/tcl && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/tcl8.6.9/unix /var/tmp/tcl8.6.9-src.tar.gz /tmp/build''')
@ubuntu
@docker
def test_repository_recursive(self):
"""test repository and recusive option"""
g = generic_autotools(preconfigure=['./autogen.sh'],
prefix='/usr/local/zeromq',
recursive=True,
repository='https://github.com/zeromq/libzmq.git')
self.assertEqual(str(g),
r'''# https://github.com/zeromq/libzmq.git
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 --recursive https://github.com/zeromq/libzmq.git libzmq && cd - && \
cd /var/tmp/libzmq && \
./autogen.sh && \
cd /var/tmp/libzmq && ./configure --prefix=/usr/local/zeromq && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/libzmq''')
@ubuntu
@docker
def test_ldconfig_and_environment(self):
"""ldconfig and environment"""
g = generic_autotools(
devel_environment={'CPATH': '/usr/local/zeromq/include:$CPATH',
'PATH': '/usr/local/zeromq/bin:$PATH'},
ldconfig=True,
preconfigure=['./autogen.sh'],
prefix='/usr/local/zeromq',
repository='https://github.com/zeromq/libzmq.git',
runtime_environment={'PATH': '/usr/local/zeromq/bin:$PATH'})
self.assertEqual(str(g),
r'''# https://github.com/zeromq/libzmq.git
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 https://github.com/zeromq/libzmq.git libzmq && cd - && \
cd /var/tmp/libzmq && \
./autogen.sh && \
cd /var/tmp/libzmq && ./configure --prefix=/usr/local/zeromq && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/zeromq/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/libzmq
ENV CPATH=/usr/local/zeromq/include:$CPATH \
PATH=/usr/local/zeromq/bin:$PATH''')
r = g.runtime()
self.assertEqual(r,
r'''# https://github.com/zeromq/libzmq.git
COPY --from=0 /usr/local/zeromq /usr/local/zeromq
RUN echo "/usr/local/zeromq/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig
ENV PATH=/usr/local/zeromq/bin:$PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
g = generic_autotools(
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
r = g.runtime()
self.assertEqual(r,
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
COPY --from=0 /usr/local/tcl /usr/local/tcl''')
@ubuntu
@docker
def test_runtime_manual(self):
"""Runtime"""
g = generic_autotools(
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
runtime=['/usr/local/tcl/bin/tclsh*', '/usr/local/tcl/lib'],
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
r = g.runtime()
self.assertEqual(r,
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
COPY --from=0 /usr/local/tcl/bin/tclsh* /usr/local/tcl/bin/
COPY --from=0 /usr/local/tcl/lib /usr/local/tcl/lib''')
@ubuntu
@docker
def test_runtime_annotate(self):
"""Runtime"""
g = generic_autotools(
annotate=True,
base_annotation='tcl',
directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
r = g.runtime()
self.assertEqual(r,
r'''# https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz
COPY --from=0 /usr/local/tcl /usr/local/tcl
LABEL hpccm.tcl.configure='./configure --prefix=/usr/local/tcl' \
hpccm.tcl.url=https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz''')
| hpc-container-maker-master | test/test_generic_autotools.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the catalyst module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.catalyst import catalyst
from hpccm.toolchain import toolchain
class Test_catalyst(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default catalyst building block"""
c = catalyst()
self.assertEqual(str(c),
r'''# ParaView Catalyst version 5.6.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
git \
gzip \
libgl1-mesa-dev \
libice-dev \
libsm-dev \
libx11-dev \
libxau-dev \
libxext-dev \
libxt-dev \
make \
tar \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\&version=v5.6\&type=catalyst\&os=Sources\&downloadFile=Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && cd /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base
ENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/catalyst/bin:$PATH''')
@centos
@docker
def test_defaults_centos(self):
"""Default catalyst building block"""
c = catalyst()
self.assertEqual(str(c),
r'''# ParaView Catalyst version 5.6.1
RUN yum install -y \
git \
gzip \
libICE-devel \
libSM-devel \
libX11-devel \
libXau-devel \
libXext-devel \
libXt-devel \
libglvnd-devel \
make \
mesa-libGL-devel \
tar \
wget \
which && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\&version=v5.6\&type=catalyst\&os=Sources\&downloadFile=Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && cd /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz /var/tmp/Catalyst-v5.6.1-Base-Enable-Python-Essentials-Extras-Rendering-Base
ENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/catalyst/bin:$PATH''')
@ubuntu
@docker
def test_edition(self):
"""edition option"""
c = catalyst(edition='Base-Essentials')
self.assertEqual(str(c),
r'''# ParaView Catalyst version 5.6.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
git \
gzip \
make \
tar \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.1-Base-Essentials.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\&version=v5.6\&type=catalyst\&os=Sources\&downloadFile=Catalyst-v5.6.1-Base-Essentials.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.1-Base-Essentials.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/Catalyst-v5.6.1-Base-Essentials/build && cd /var/tmp/Catalyst-v5.6.1-Base-Essentials/build && /var/tmp/Catalyst-v5.6.1-Base-Essentials/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.1-Base-Essentials && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Essentials/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/Catalyst-v5.6.1-Base-Essentials/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/Catalyst-v5.6.1-Base-Essentials.tar.gz /var/tmp/Catalyst-v5.6.1-Base-Essentials
ENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/catalyst/bin:$PATH''')
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
c = catalyst(ldconfig=True, version='5.6.0')
self.assertEqual(str(c),
r'''# ParaView Catalyst version 5.6.0
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
git \
gzip \
libgl1-mesa-dev \
libice-dev \
libsm-dev \
libx11-dev \
libxau-dev \
libxext-dev \
libxt-dev \
make \
tar \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -O /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -P /var/tmp https://www.paraview.org/paraview-downloads/download.php?submit=Download\&version=v5.6\&type=catalyst\&os=Sources\&downloadFile=Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && cd /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base/build && /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base/cmake.sh -DCMAKE_INSTALL_PREFIX=/usr/local/catalyst /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base && \
cmake --build /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base/build --target install -- -j$(nproc) && \
echo "/usr/local/catalyst/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base.tar.gz /var/tmp/Catalyst-v5.6.0-Base-Enable-Python-Essentials-Extras-Rendering-Base
ENV PATH=/usr/local/catalyst/bin:$PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
c = catalyst()
r = c.runtime()
self.assertEqual(r,
r'''# ParaView Catalyst
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libgl1-mesa-glx \
libice6 \
libsm6 \
libx11-6 \
libxau6 \
libxext6 \
libxt6 && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /usr/local/catalyst /usr/local/catalyst
ENV LD_LIBRARY_PATH=/usr/local/catalyst/lib:$LD_LIBRARY_PATH \
PATH=/usr/local/catalyst/bin:$PATH''')
| hpc-container-maker-master | test/test_catalyst.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the packages module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, invalid_distro, ubuntu
from hpccm.building_blocks.packages import packages
class Test_packages(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_basic_ubuntu(self):
"""Basic packages"""
p = packages(ospackages=['gcc', 'g++', 'gfortran'])
self.assertEqual(str(p),
r'''RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
g++ \
gcc \
gfortran && \
rm -rf /var/lib/apt/lists/*''')
@centos
@docker
def test_basic_centos(self):
"""Basic packages"""
p = packages(ospackages=['gcc', 'gcc-c++', 'gcc-fortran'])
self.assertEqual(str(p),
r'''RUN yum install -y \
gcc \
gcc-c++ \
gcc-fortran && \
rm -rf /var/cache/yum/*''')
@invalid_distro
def test_invalid_distro(self):
"""Invalid package type specified"""
with self.assertRaises(RuntimeError):
packages(ospackages=['gcc', 'g++', 'gfortran'])
| hpc-container-maker-master | test/test_packages.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the label module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import bash, docker, invalid_ctype, singularity
from hpccm.primitives.label import label
class Test_label(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No label specified"""
l = label()
self.assertEqual(str(l), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified"""
l = label(metadata={'A': 'B'})
with self.assertRaises(RuntimeError):
str(l)
@docker
def test_single_docker(self):
"""Single label specified"""
l = label(metadata={'A': 'B'})
self.assertEqual(str(l), 'LABEL A=B')
@singularity
def test_single_singularity(self):
"""Single label specified"""
l = label(metadata={'A': 'B'})
self.assertEqual(str(l), '%labels\n A B')
@bash
def test_single_bash(self):
"""Single label specified"""
l = label(metadata={'A': 'B'})
self.assertEqual(str(l), '')
@docker
def test_multiple_docker(self):
"""Multiple labels specified"""
l = label(metadata={'ONE': 1, 'TWO': 2, 'THREE': 3})
self.assertEqual(str(l),
'''LABEL ONE=1 \\
THREE=3 \\
TWO=2''')
@singularity
def test_multiple_singularity(self):
"""Multiple labels specified"""
l = label(metadata={'ONE': 1, 'TWO': 2, 'THREE': 3})
self.assertEqual(str(l),
'''%labels
ONE 1
THREE 3
TWO 2''')
@singularity
def test_applabel_multiple_singularity(self):
"""Multiple app-specific labels specified"""
l = label(metadata={'ONE': 1, 'TWO': 2, 'THREE': 3}, _app='foo')
self.assertEqual(str(l),
'''%applabels foo
ONE 1
THREE 3
TWO 2''')
@docker
def test_applabels_docker(self):
"""applabels not implemented in Docker"""
l = label(metadata={'ONE': 1, 'TWO': 2, 'THREE': 3}, _app='foo')
self.assertEqual(str(l),
'''LABEL ONE=1 \\
THREE=3 \\
TWO=2''')
@docker
def test_merge_docker(self):
"""merge primitives"""
l = []
l.append(label(metadata={'ONE': 1, 'TWO': 2}))
l.append(label(metadata={'THREE': 3}))
merged = l[0].merge(l)
self.assertEqual(str(merged),
'''LABEL ONE=1 \\
THREE=3 \\
TWO=2''')
l.append(label(metadata={'ONE': 'uno'}))
key_overwrite = l[0].merge(l)
self.assertEqual(str(key_overwrite),
'''LABEL ONE=uno \\
THREE=3 \\
TWO=2''')
@singularity
def test_merge_singularity(self):
"""merge primitives"""
l = []
l.append(label(metadata={'ONE': 1, 'TWO': 2}))
l.append(label(metadata={'THREE': 3}))
merged = l[0].merge(l)
self.assertEqual(str(merged),
'''%labels
ONE 1
THREE 3
TWO 2''')
l.append(label(metadata={'ONE': 'uno'}))
key_overwrite = l[0].merge(l)
self.assertEqual(str(key_overwrite),
'''%labels
ONE uno
THREE 3
TWO 2''')
| hpc-container-maker-master | test/test_label.py |
include('../test/include1.py')
compiler = gnu()
| hpc-container-maker-master | test/include2.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the xpmem module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.xpmem import xpmem
class Test_xpmem(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default xpmem building block"""
x = xpmem()
self.assertEqual(str(x),
r'''# XPMEM branch master
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
autoconf \
automake \
ca-certificates \
file \
git \
libtool \
make && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 --branch master https://github.com/hjelmn/xpmem.git xpmem && cd - && \
cd /var/tmp/xpmem && \
autoreconf --install && \
cd /var/tmp/xpmem && ./configure --prefix=/usr/local/xpmem --disable-kernel-module && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/xpmem
ENV CPATH=/usr/local/xpmem/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/xpmem/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/xpmem/lib:$LIBRARY_PATH''')
@centos
@docker
def test_defaults_centos(self):
"""Default xpmem building block"""
x = xpmem()
self.assertEqual(str(x),
r'''# XPMEM branch master
RUN yum install -y \
autoconf \
automake \
ca-certificates \
file \
git \
libtool \
make && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 --branch master https://github.com/hjelmn/xpmem.git xpmem && cd - && \
cd /var/tmp/xpmem && \
autoreconf --install && \
cd /var/tmp/xpmem && ./configure --prefix=/usr/local/xpmem --disable-kernel-module && \
make -j$(nproc) && \
make -j$(nproc) install && \
rm -rf /var/tmp/xpmem
ENV CPATH=/usr/local/xpmem/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/xpmem/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/xpmem/lib:$LIBRARY_PATH''')
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
x = xpmem(ldconfig=True, branch='master')
self.assertEqual(str(x),
r'''# XPMEM branch master
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
autoconf \
automake \
ca-certificates \
file \
git \
libtool \
make && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 --branch master https://github.com/hjelmn/xpmem.git xpmem && cd - && \
cd /var/tmp/xpmem && \
autoreconf --install && \
cd /var/tmp/xpmem && ./configure --prefix=/usr/local/xpmem --disable-kernel-module && \
make -j$(nproc) && \
make -j$(nproc) install && \
echo "/usr/local/xpmem/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/xpmem
ENV CPATH=/usr/local/xpmem/include:$CPATH \
LIBRARY_PATH=/usr/local/xpmem/lib:$LIBRARY_PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
x = xpmem()
r = x.runtime()
self.assertEqual(r,
r'''# XPMEM
COPY --from=0 /usr/local/xpmem /usr/local/xpmem
ENV CPATH=/usr/local/xpmem/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/xpmem/lib:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/xpmem/lib:$LIBRARY_PATH''')
| hpc-container-maker-master | test/test_xpmem.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the ofed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import aarch64, centos, centos8, docker, ubuntu, ubuntu18, ubuntu20, ppc64le, x86_64
from hpccm.building_blocks.ofed import ofed
class Test_ofed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default ofed building block"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t xenial \
dapl2-utils \
ibutils \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibcm-dev \
libibcm1 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu18
@docker
def test_defaults_ubuntu18(self):
"""Default ofed building block"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t bionic \
dapl2-utils \
ibutils \
ibverbs-providers \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu20
@docker
def test_defaults_ubuntu20(self):
"""Default ofed building block"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t focal \
dapl2-utils \
ibutils \
ibverbs-providers \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default ofed building block"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN yum install -y --disablerepo=mlnx\* \
dapl \
dapl-devel \
ibutils \
libibcm \
libibmad \
libibmad-devel \
libibumad \
libibverbs \
libibverbs-utils \
libmlx5 \
librdmacm \
rdma-core \
rdma-core-devel && \
rm -rf /var/cache/yum/*''')
@x86_64
@centos8
@docker
def test_defaults_centos8(self):
"""Default ofed building block"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN yum install -y dnf-utils && \
yum-config-manager --set-enabled powertools && \
yum install -y --disablerepo=mlnx\* \
libibmad \
libibmad-devel \
libibumad \
libibverbs \
libibverbs-utils \
libmlx5 \
librdmacm \
rdma-core \
rdma-core-devel && \
rm -rf /var/cache/yum/*''')
@x86_64
@ubuntu
@docker
def test_prefix_ubuntu16(self):
o = ofed(prefix='/usr/local/ofed')
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-200 \
libnl-route-3-200 \
libnuma1 && \
rm -rf /var/lib/apt/lists/*
RUN apt-get update -y && \
mkdir -m 777 -p /var/tmp/packages_download && cd /var/tmp/packages_download && \
DEBIAN_FRONTEND=noninteractive apt-get download -y --no-install-recommends -t xenial \
dapl2-utils \
ibutils \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibcm-dev \
libibcm1 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
mkdir -p /usr/local/ofed && \
find /var/tmp/packages_download -regextype posix-extended -type f -regex "/var/tmp/packages_download/(dapl2-utils|ibutils|ibverbs-utils|infiniband-diags|libdapl-dev|libdapl2|libibcm-dev|libibcm1|libibmad-dev|libibmad5|libibverbs-dev|libibverbs1|libmlx4-1|libmlx4-dev|libmlx5-1|libmlx5-dev|librdmacm-dev|librdmacm1|rdmacm-utils).*deb" -exec dpkg --extract {} /usr/local/ofed \; && \
rm -rf /var/tmp/packages_download && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /etc/libibverbs.d''')
@aarch64
@ubuntu
@docker
def test_aarch64_ubuntu16(self):
"""aarch64"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t xenial \
ibutils \
ibverbs-utils \
infiniband-diags \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@ppc64le
@ubuntu
@docker
def test_ppc64le_ubuntu16(self):
"""ppc64le"""
o = ofed()
self.assertEqual(str(o),
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t xenial \
dapl2-utils \
ibutils \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@x86_64
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
o = ofed()
r = o.runtime()
self.assertEqual(r,
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends -t xenial \
dapl2-utils \
ibutils \
ibverbs-utils \
infiniband-diags \
libdapl-dev \
libdapl2 \
libibcm-dev \
libibcm1 \
libibmad-dev \
libibmad5 \
libibverbs-dev \
libibverbs1 \
libmlx4-1 \
libmlx4-dev \
libmlx5-1 \
libmlx5-dev \
librdmacm-dev \
librdmacm1 \
rdmacm-utils && \
rm -rf /var/lib/apt/lists/*''')
@ubuntu
@docker
def test_runtime_prefix(self):
"""Prefix + runtime"""
o = ofed(prefix='/usr/local/ofed')
r = o.runtime()
self.assertEqual(r,
r'''# OFED
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-200 \
libnl-route-3-200 \
libnuma1 && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /etc/libibverbs.d
COPY --from=0 /usr/local/ofed /usr/local/ofed''')
| hpc-container-maker-master | test/test_ofed.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the user module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import bash, docker, invalid_ctype, singularity
from hpccm.primitives.user import user
class Test_user(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No user specified"""
u = user()
self.assertEqual(str(u), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified"""
u = user(user='root')
with self.assertRaises(RuntimeError):
str(u)
@docker
def test_docker(self):
"""User specified"""
u = user(user='root')
self.assertEqual(str(u), 'USER root')
@singularity
def test_singularity(self):
"""User specified"""
u = user(user='root')
self.assertEqual(str(u), '')
@bash
def test_bash(self):
"""User specified"""
u = user(user='root')
self.assertEqual(str(u), '')
| hpc-container-maker-master | test/test_user.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the zip module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.zipfile import zipfile
class Test_zipfile(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_missing_zipfile(self):
"""Missing zipfile option"""
z = zipfile()
self.assertEqual(z.unzip_step(), '')
def test_filetypes(self):
z = zipfile()
self.assertEqual(z.unzip_step('foo.zip'), 'unzip foo.zip')
def test_directory(self):
"""Directory specified"""
z = zipfile()
self.assertEqual(z.unzip_step('foo.zip', 'bar'),
'mkdir -p bar && unzip -d bar foo.zip')
| hpc-container-maker-master | test/test_zipfile.py |