code
stringlengths 17
6.64M
|
---|
class MixtureODELayer(nn.Module):
'Produces a mixture of experts where output = sigma(t) * f(t, x).\n Time-dependent weights sigma(t) help learn to blend the experts without resorting to a highly stiff f.\n Supports both regular and diffeq experts.\n '
def __init__(self, experts):
super(MixtureODELayer, self).__init__()
assert (len(experts) > 1)
wrapped_experts = [diffeq_wrapper(ex) for ex in experts]
self.experts = nn.ModuleList(wrapped_experts)
self.mixture_weights = nn.Linear(1, len(self.experts))
def forward(self, t, y):
dys = []
for f in self.experts:
dys.append(f(t, y))
dys = torch.stack(dys, 0)
weights = self.mixture_weights(t).view((- 1), *([1] * (dys.ndimension() - 1)))
dy = torch.sum((dys * weights), dim=0, keepdim=False)
return dy
|
class ResNet(container.SequentialDiffEq):
def __init__(self, dim, intermediate_dim, n_resblocks, conv_block=None):
super(ResNet, self).__init__()
if (conv_block is None):
conv_block = basic.ConcatCoordConv2d
self.dim = dim
self.intermediate_dim = intermediate_dim
self.n_resblocks = n_resblocks
layers = []
layers.append(conv_block(dim, intermediate_dim, ksize=3, stride=1, padding=1, bias=False))
for _ in range(n_resblocks):
layers.append(BasicBlock(intermediate_dim, conv_block))
layers.append(nn.GroupNorm(NGROUPS, intermediate_dim, eps=0.0001))
layers.append(nn.ReLU(inplace=True))
layers.append(conv_block(intermediate_dim, dim, ksize=1, bias=False))
super(ResNet, self).__init__(*layers)
def __repr__(self):
return '{name}({dim}, intermediate_dim={intermediate_dim}, n_resblocks={n_resblocks})'.format(name=self.__class__.__name__, **self.__dict__)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim, conv_block=None):
super(BasicBlock, self).__init__()
if (conv_block is None):
conv_block = basic.ConcatCoordConv2d
self.norm1 = nn.GroupNorm(NGROUPS, dim, eps=0.0001)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
self.norm2 = nn.GroupNorm(NGROUPS, dim, eps=0.0001)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
def forward(self, t, x):
residual = x
out = self.norm1(x)
out = self.relu1(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu2(out)
out = self.conv2(t, out)
out += residual
return out
|
class DiffEqWrapper(nn.Module):
def __init__(self, module):
super(DiffEqWrapper, self).__init__()
self.module = module
if (len(signature(self.module.forward).parameters) == 1):
self.diffeq = (lambda t, y: self.module(y))
elif (len(signature(self.module.forward).parameters) == 2):
self.diffeq = self.module
else:
raise ValueError('Differential equation needs to either take (t, y) or (y,) as input.')
def forward(self, t, y):
return self.diffeq(t, y)
def __repr__(self):
return self.diffeq.__repr__()
|
def diffeq_wrapper(layer):
return DiffEqWrapper(layer)
|
class ReshapeDiffEq(nn.Module):
def __init__(self, input_shape, net):
super(ReshapeDiffEq, self).__init__()
assert (len(signature(net.forward).parameters) == 2), 'use diffeq_wrapper before reshape_wrapper.'
self.input_shape = input_shape
self.net = net
def forward(self, t, x):
batchsize = x.shape[0]
x = x.view(batchsize, *self.input_shape)
return self.net(t, x).view(batchsize, (- 1))
def __repr__(self):
return self.diffeq.__repr__()
|
def reshape_wrapper(input_shape, layer):
return ReshapeDiffEq(input_shape, layer)
|
class ZeroMeanTransform(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, x, logpx=None, reverse=False):
if reverse:
x = (x + 0.5)
if (logpx is None):
return x
return (x, logpx)
else:
x = (x - 0.5)
if (logpx is None):
return x
return (x, logpx)
|
class LogitTransform(nn.Module):
'\n The proprocessing step used in Real NVP:\n y = sigmoid(x) - a / (1 - 2a)\n x = logit(a + (1 - 2a)*y)\n '
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _sigmoid(x, logpx, self.alpha)
else:
return _logit(x, logpx, self.alpha)
|
class SigmoidTransform(nn.Module):
'Reverse of LogitTransform.'
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _logit(x, logpx, self.alpha)
else:
return _sigmoid(x, logpx, self.alpha)
|
def _logit(x, logpx=None, alpha=_DEFAULT_ALPHA):
s = (alpha + ((1 - (2 * alpha)) * x))
y = (torch.log(s) - torch.log((1 - s)))
if (logpx is None):
return y
return (y, (logpx - _logdetgrad(x, alpha).view(x.size(0), (- 1)).sum(1, keepdim=True)))
|
def _sigmoid(y, logpy=None, alpha=_DEFAULT_ALPHA):
x = ((torch.sigmoid(y) - alpha) / (1 - (2 * alpha)))
if (logpy is None):
return x
return (x, (logpy + _logdetgrad(x, alpha).view(x.size(0), (- 1)).sum(1, keepdim=True)))
|
def _logdetgrad(x, alpha):
s = (alpha + ((1 - (2 * alpha)) * x))
logdetgrad = ((- torch.log((s - (s * s)))) + math.log((1 - (2 * alpha))))
return logdetgrad
|
class BruteForceLayer(nn.Module):
def __init__(self, dim):
super(BruteForceLayer, self).__init__()
self.weight = nn.Parameter(torch.eye(dim))
def forward(self, x, logpx=None, reverse=False):
if (not reverse):
y = F.linear(x, self.weight)
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad.expand_as(logpx)))
else:
y = F.linear(x, self.weight.double().inverse().float())
if (logpx is None):
return y
else:
return (y, (logpx + self._logdetgrad.expand_as(logpx)))
@property
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight.double()))).float()
|
class PlanarFlow(nn.Module):
def __init__(self, nd=1):
super(PlanarFlow, self).__init__()
self.nd = nd
self.activation = torch.tanh
self.register_parameter('u', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('w', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('b', nn.Parameter(torch.randn(1)))
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.nd))
self.u.data.uniform_((- stdv), stdv)
self.w.data.uniform_((- stdv), stdv)
self.b.data.fill_(0)
self.make_invertible()
def make_invertible(self):
u = self.u.data
w = self.w.data
dot = torch.dot(u, w)
m = ((- 1) + math.log((1 + math.exp(dot))))
du = (((m - dot) / torch.norm(w)) * w)
u = (u + du)
self.u.data = u
def forward(self, z, logp=None, reverse=False):
'Computes f(z) and log q(f(z))'
assert (not reverse), 'Planar normalizing flow cannot be reversed.'
(logp - torch.log((self._detgrad(z) + 1e-08)))
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
z = (z + (self.u.expand_as(z) * h))
f = self.sample(z)
if (logp is not None):
qf = self.log_density(z, logp)
return (f, qf)
else:
return f
def sample(self, z):
'Computes f(z)'
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
output = (z + (self.u.expand_as(z) * h))
return output
def _detgrad(self, z):
'Computes |det df/dz|'
with torch.enable_grad():
z = z.requires_grad_(True)
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
psi = grad(h, z, grad_outputs=torch.ones_like(h), create_graph=True, only_inputs=True)[0]
u_dot_psi = torch.mm(psi, self.u.view(self.nd, 1))
detgrad = (1 + u_dot_psi)
return detgrad
def log_density(self, z, logqz):
'Computes log density of the flow given the log density of z'
return (logqz - torch.log((self._detgrad(z) + 1e-08)))
|
class MovingBatchNormNd(nn.Module):
def __init__(self, num_features, eps=0.0001, decay=0.1, bn_lag=0.0, affine=True):
super(MovingBatchNormNd, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.decay = decay
self.bn_lag = bn_lag
self.register_buffer('step', torch.zeros(1))
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
@property
def shape(self):
raise NotImplementedError
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._reverse(x, logpx)
else:
return self._forward(x, logpx)
def _forward(self, x, logpx=None):
c = x.size(1)
used_mean = self.running_mean.clone().detach()
used_var = self.running_var.clone().detach()
if self.training:
x_t = x.transpose(0, 1).contiguous().view(c, (- 1))
batch_mean = torch.mean(x_t, dim=1)
batch_var = torch.var(x_t, dim=1)
if (self.bn_lag > 0):
used_mean = (batch_mean - ((1 - self.bn_lag) * (batch_mean - used_mean.detach())))
used_mean /= (1.0 - (self.bn_lag ** (self.step[0] + 1)))
used_var = (batch_var - ((1 - self.bn_lag) * (batch_var - used_var.detach())))
used_var /= (1.0 - (self.bn_lag ** (self.step[0] + 1)))
self.running_mean -= (self.decay * (self.running_mean - batch_mean.data))
self.running_var -= (self.decay * (self.running_var - batch_var.data))
self.step += 1
used_mean = used_mean.view(*self.shape).expand_as(x)
used_var = used_var.view(*self.shape).expand_as(x)
y = ((x - used_mean) * torch.exp(((- 0.5) * torch.log((used_var + self.eps)))))
if self.affine:
weight = self.weight.view(*self.shape).expand_as(x)
bias = self.bias.view(*self.shape).expand_as(x)
y = ((y * torch.exp(weight)) + bias)
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad(x, used_var).view(x.size(0), (- 1)).sum(1, keepdim=True)))
def _reverse(self, y, logpy=None):
used_mean = self.running_mean
used_var = self.running_var
if self.affine:
weight = self.weight.view(*self.shape).expand_as(y)
bias = self.bias.view(*self.shape).expand_as(y)
y = ((y - bias) * torch.exp((- weight)))
used_mean = used_mean.view(*self.shape).expand_as(y)
used_var = used_var.view(*self.shape).expand_as(y)
x = ((y * torch.exp((0.5 * torch.log((used_var + self.eps))))) + used_mean)
if (logpy is None):
return x
else:
return (x, (logpy + self._logdetgrad(x, used_var).view(x.size(0), (- 1)).sum(1, keepdim=True)))
def _logdetgrad(self, x, used_var):
logdetgrad = ((- 0.5) * torch.log((used_var + self.eps)))
if self.affine:
weight = self.weight.view(*self.shape).expand(*x.size())
logdetgrad += weight
return logdetgrad
def __repr__(self):
return '{name}({num_features}, eps={eps}, decay={decay}, bn_lag={bn_lag}, affine={affine})'.format(name=self.__class__.__name__, **self.__dict__)
|
def stable_var(x, mean=None, dim=1):
if (mean is None):
mean = x.mean(dim, keepdim=True)
mean = mean.view((- 1), 1)
res = torch.pow((x - mean), 2)
max_sqr = torch.max(res, dim, keepdim=True)[0]
var = (torch.mean((res / max_sqr), 1, keepdim=True) * max_sqr)
var = var.view((- 1))
var[(var != var)] = 0
return var
|
class MovingBatchNorm1d(MovingBatchNormNd):
@property
def shape(self):
return [1, (- 1)]
|
class MovingBatchNorm2d(MovingBatchNormNd):
@property
def shape(self):
return [1, (- 1), 1, 1]
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=0.0001)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=0.0001)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
|
class ResNeXtBottleneck(nn.Module):
'\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n '
def __init__(self, dim, cardinality=4, base_depth=32):
' Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n base_width: base number of channels in each group.\n widen_factor: factor to reduce the input dimensionality before convolution.\n '
super(ResNeXtBottleneck, self).__init__()
D = (cardinality * base_depth)
self.conv_reduce = nn.Conv2d(dim, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_grp = nn.Conv2d(D, D, kernel_size=3, stride=1, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, dim, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(dim)
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_grp.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
return F.relu((x + bottleneck), inplace=True)
|
class SqueezeLayer(nn.Module):
def __init__(self, downscale_factor):
super(SqueezeLayer, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._upsample(x, logpx)
else:
return self._downsample(x, logpx)
def _downsample(self, x, logpx=None):
squeeze_x = squeeze(x, self.downscale_factor)
if (logpx is None):
return squeeze_x
else:
return (squeeze_x, logpx)
def _upsample(self, y, logpy=None):
unsqueeze_y = unsqueeze(y, self.downscale_factor)
if (logpy is None):
return unsqueeze_y
else:
return (unsqueeze_y, logpy)
|
def unsqueeze(input, upscale_factor=2):
'\n [:, C*r^2, H, W] -> [:, C, H*r, W*r]\n '
(batch_size, in_channels, in_height, in_width) = input.size()
out_channels = (in_channels // (upscale_factor ** 2))
out_height = (in_height * upscale_factor)
out_width = (in_width * upscale_factor)
input_view = input.contiguous().view(batch_size, out_channels, upscale_factor, upscale_factor, in_height, in_width)
output = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
|
def squeeze(input, downscale_factor=2):
'\n [:, C, H*r, W*r] -> [:, C*r^2, H, W]\n '
(batch_size, in_channels, in_height, in_width) = input.size()
out_channels = (in_channels * (downscale_factor ** 2))
out_height = (in_height // downscale_factor)
out_width = (in_width // downscale_factor)
input_view = input.contiguous().view(batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor)
output = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
|
class MultiscaleParallelCNF(nn.Module):
'\n CNF model for image data.\n\n Squeezes the input into multiple scales, applies different conv-nets at each scale\n and adds the resulting gradients\n\n Will downsample the input until one of the\n dimensions is less than or equal to 4.\n\n Args:\n input_size (tuple): 4D tuple of the input size.\n n_scale (int): Number of scales for the representation z.\n n_resblocks (int): Length of the resnet for each coupling layer.\n '
def __init__(self, input_size, n_scale=float('inf'), n_blocks=1, intermediate_dims=(32,), alpha=(- 1), time_length=1.0):
super(MultiscaleParallelCNF, self).__init__()
print(input_size)
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.alpha = alpha
self.time_length = time_length
if (not (self.n_scale > 0)):
raise ValueError(('Could not compute number of scales for input ofsize (%d,%d,%d,%d)' % input_size))
self.transforms = self._build_net(input_size)
def _build_net(self, input_size):
(_, c, h, w) = input_size
transforms = []
transforms.append(ParallelCNFLayers(initial_size=(c, h, w), idims=self.intermediate_dims, init_layer=(layers.LogitTransform(self.alpha) if (self.alpha > 0) else layers.ZeroMeanTransform()), n_blocks=self.n_blocks, time_length=self.time_length))
return nn.ModuleList(transforms)
def get_regularization(self):
if (len(self.regularization_fns) == 0):
return None
acc_reg_states = tuple(([0.0] * len(self.regularization_fns)))
for module in self.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(((acc + reg) for (acc, reg) in zip(acc_reg_states, module.get_regularization_states())))
return sum(((state * coeff) for (state, coeff) in zip(acc_reg_states, self.regularization_coeffs)))
def _calc_n_scale(self, input_size):
(_, _, h, w) = input_size
n_scale = 0
while ((h >= 4) and (w >= 4)):
n_scale += 1
h = (h // 2)
w = (w // 2)
return n_scale
def calc_output_size(self, input_size):
(n, c, h, w) = input_size
output_sizes = []
for i in range(self.n_scale):
if (i < (self.n_scale - 1)):
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _logdensity(self, x, logpx=None):
_logpx = (torch.zeros(x.shape[0], 1).to(x) if (logpx is None) else logpx)
for idx in range(len(self.transforms)):
(x, _logpx) = self.transforms[idx].forward(x, _logpx)
return (x if (logpx is None) else (x, _logpx))
def _generate(self, z, logpz=None):
_logpz = (torch.zeros(z.shape[0], 1).to(z) if (logpz is None) else logpz)
for idx in reversed(range(len(self.transforms))):
(z, _logpz) = self.transforms[idx](z, _logpz, reverse=True)
return (z if (logpz is None) else (z, _logpz))
|
class ParallelSumModules(nn.Module):
def __init__(self, models):
super(ParallelSumModules, self).__init__()
self.models = nn.ModuleList(models)
self.cpu = (not torch.cuda.is_available())
def forward(self, t, y):
out = sum((model(t, y) for model in self.models))
return out
|
class ParallelCNFLayers(layers.SequentialFlow):
def __init__(self, initial_size, idims=(32,), scales=4, init_layer=None, n_blocks=1, time_length=1.0):
strides = tuple(([1] + [1 for _ in idims]))
chain = []
if (init_layer is not None):
chain.append(init_layer)
get_size = (lambda s: ((initial_size[0] * (4 ** s)), (initial_size[1] // (2 ** s)), (initial_size[2] // (2 ** s))))
def _make_odefunc():
nets = [ODEnet(idims, get_size(scale), strides, True, layer_type='concat', num_squeeze=scale) for scale in range(scales)]
net = ParallelSumModules(nets)
f = layers.ODEfunc(net)
return f
chain += [layers.CNF(_make_odefunc(), T=time_length) for _ in range(n_blocks)]
super(ParallelCNFLayers, self).__init__(chain)
|
class Uniform(nn.Module):
def __init__(self, a=0, b=1):
super(Normal, self).__init__()
self.a = Variable(torch.Tensor([a]))
self.b = Variable(torch.Tensor([b]))
def _check_inputs(self, size, params):
if ((size is None) and (params is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (params is not None)):
a = params.select((- 1), 0).expand(size)
b = params.select((- 1), 1).expand(size)
return (a, b)
elif (size is not None):
a = self.a.expand(size)
b = self.b.expand(size)
return (a, b)
elif (params is not None):
a = params.select((- 1), 0)
b = params.select((- 1), 1)
return (a, b)
else:
raise ValueError('Given invalid inputs: size={}, params={})'.format(size, params))
def sample(self, size=None, params=None):
(mu, logsigma) = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = ((std_z * torch.exp(logsigma)) + mu)
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logsigma) = self._check_inputs(None, params)
else:
(mu, logsigma) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp((- logsigma))
tmp = ((sample - mu) * inv_sigma)
return ((- 0.5) * (((tmp * tmp) + (2 * logsigma)) + c))
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logsigma.exp().data[0]))
return tmpstr
|
class Normal(nn.Module):
'Samples from a Normal distribution using the reparameterization trick.\n '
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log((2 * np.pi))]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if ((size is None) and (mu_logsigma is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logsigma is not None)):
mu = mu_logsigma.select((- 1), 0).expand(size)
logsigma = mu_logsigma.select((- 1), 1).expand(size)
return (mu, logsigma)
elif (size is not None):
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return (mu, logsigma)
elif (mu_logsigma is not None):
mu = mu_logsigma.select((- 1), 0)
logsigma = mu_logsigma.select((- 1), 1)
return (mu, logsigma)
else:
raise ValueError('Given invalid inputs: size={}, mu_logsigma={})'.format(size, mu_logsigma))
def sample(self, size=None, params=None):
(mu, logsigma) = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = ((std_z * torch.exp(logsigma)) + mu)
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logsigma) = self._check_inputs(None, params)
else:
(mu, logsigma) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp((- logsigma))
tmp = ((sample - mu) * inv_sigma)
return ((- 0.5) * (((tmp * tmp) + (2 * logsigma)) + c))
def NLL(self, params, sample_params=None):
'Analytically computes\n E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]\n If mu_2, and sigma_2^2 are not provided, defaults to entropy.\n '
(mu, logsigma) = self._check_inputs(None, params)
if (sample_params is not None):
(sample_mu, sample_logsigma) = self._check_inputs(None, sample_params)
else:
(sample_mu, sample_logsigma) = (mu, logsigma)
c = self.normalization.type_as(sample_mu.data)
nll = ((((logsigma.mul((- 2)).exp() * (sample_mu - mu).pow(2)) + torch.exp((sample_logsigma.mul(2) - logsigma.mul(2)))) + (2 * logsigma)) + c)
return nll.mul(0.5)
def kld(self, params):
'Computes KL(q||p) where q is the given distribution and p\n is the standard Normal distribution.\n '
(mu, logsigma) = self._check_inputs(None, params)
kld = ((logsigma.mul(2).add(1) - mu.pow(2)) - logsigma.exp().pow(2))
kld.mul_((- 0.5))
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logsigma.exp().data[0]))
return tmpstr
|
class Laplace(nn.Module):
'Samples from a Laplace distribution using the reparameterization trick.\n '
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([(- math.log(2))]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if ((size is None) and (mu_logscale is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logscale is not None)):
mu = mu_logscale.select((- 1), 0).expand(size)
logscale = mu_logscale.select((- 1), 1).expand(size)
return (mu, logscale)
elif (size is not None):
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return (mu, logscale)
elif (mu_logscale is not None):
mu = mu_logscale.select((- 1), 0)
logscale = mu_logscale.select((- 1), 1)
return (mu, logscale)
else:
raise ValueError('Given invalid inputs: size={}, mu_logscale={})'.format(size, mu_logscale))
def sample(self, size=None, params=None):
(mu, logscale) = self._check_inputs(size, params)
scale = torch.exp(logscale)
u = (Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5)
sample = (mu - ((scale * torch.sign(u)) * torch.log(((1 - (2 * torch.abs(u))) + eps))))
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logscale) = self._check_inputs(None, params)
else:
(mu, logscale) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp((- logscale))
ins_exp = ((- torch.abs((sample - mu))) * inv_scale)
return ((ins_exp + c) - logscale)
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logscale.exp().data[0]))
return tmpstr
|
class SpectralNorm(object):
def __init__(self, name='weight', dim=0, eps=1e-12):
self.name = name
self.dim = dim
self.eps = eps
def compute_weight(self, module, n_power_iterations):
if (n_power_iterations < 0):
raise ValueError('Expected n_power_iterations to be non-negative, but got n_power_iterations={}'.format(n_power_iterations))
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
v = getattr(module, (self.name + '_v'))
weight_mat = weight
if (self.dim != 0):
weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, (- 1))
with torch.no_grad():
for _ in range(n_power_iterations):
v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
setattr(module, (self.name + '_u'), u)
setattr(module, (self.name + '_v'), v)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = (weight / sigma)
setattr(module, self.name, weight)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight))
def get_update_method(self, module):
def update_fn(module, n_power_iterations):
self.compute_weight(module, n_power_iterations)
return update_fn
def __call__(self, module, unused_inputs):
del unused_inputs
self.compute_weight(module, n_power_iterations=0)
if (not module.training):
r_g = getattr(module, (self.name + '_orig')).requires_grad
setattr(module, self.name, getattr(module, self.name).detach().requires_grad_(r_g))
@staticmethod
def apply(module, name, dim, eps):
fn = SpectralNorm(name, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(int((weight.numel() / height))).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
module.register_buffer(fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_buffer((fn.name + '_v'), v)
setattr(module, POWER_ITERATION_FN, types.MethodType(fn.get_update_method(module), module))
module.register_forward_pre_hook(fn)
return fn
|
def inplace_spectral_norm(module, name='weight', dim=None, eps=1e-12):
'Applies spectral normalization to a parameter in the given module.\n\n .. math::\n \\mathbf{W} = \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\\n \\sigma(\\mathbf{W}) = \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}\n\n Spectral normalization stabilizes the training of discriminators (critics)\n in Generaive Adversarial Networks (GANs) by rescaling the weight tensor\n with spectral norm :math:`\\sigma` of the weight matrix calculated using\n power iteration method. If the dimension of the weight tensor is greater\n than 2, it is reshaped to 2D in power iteration method to get spectral\n norm. This is implemented via a hook that calculates spectral norm and\n rescales weight before every :meth:`~Module.forward` call.\n\n See `Spectral Normalization for Generative Adversarial Networks`_ .\n\n .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n n_power_iterations (int, optional): number of power iterations to\n calculate spectal norm\n dim (int, optional): dimension corresponding to number of outputs,\n the default is 0, except for modules that are instances of\n ConvTranspose1/2/3d, when it is 1\n eps (float, optional): epsilon for numerical stability in\n calculating norms\n\n Returns:\n The original module with the spectal norm hook\n\n Example::\n\n >>> m = spectral_norm(nn.Linear(20, 40))\n Linear (20 -> 40)\n >>> m.weight_u.size()\n torch.Size([20])\n\n '
if (dim is None):
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, dim=dim, eps=eps)
return module
|
def remove_spectral_norm(module, name='weight'):
'Removes the spectral normalization reparameterization from a module.\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n\n Example:\n >>> m = spectral_norm(nn.Linear(40, 10))\n >>> remove_spectral_norm(m)\n '
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
|
def makedirs(dirname):
if (not os.path.exists(dirname)):
os.makedirs(dirname)
|
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode='a')
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, 'r') as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
return logger
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class RunningAverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if (self.val is None):
self.avg = val
else:
self.avg = ((self.avg * self.momentum) + (val * (1 - self.momentum)))
self.val = val
|
def inf_generator(iterable):
'Allows training with DataLoaders in a single infinite loop:\n for i, (x, y) in enumerate(inf_generator(train_loader)):\n '
iterator = iterable.__iter__()
while True:
try:
(yield iterator.__next__())
except StopIteration:
iterator = iterable.__iter__()
|
def save_checkpoint(state, save, epoch):
if (not os.path.exists(save)):
os.makedirs(save)
filename = os.path.join(save, ('checkpt-%04d.pth' % epoch))
torch.save(state, filename)
|
def isnan(tensor):
return (tensor != tensor)
|
def logsumexp(value, dim=None, keepdim=False):
'Numerically stable implementation of the operation\n value.exp().sum(dim, keepdim).log()\n '
if (dim is not None):
(m, _) = torch.max(value, dim=dim, keepdim=True)
value0 = (value - m)
if (keepdim is False):
m = m.squeeze(dim)
return (m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim)))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp((value - m)))
if isinstance(sum_exp, Number):
return (m + math.log(sum_exp))
else:
return (m + torch.log(sum_exp))
|
def add_noise(x):
'\n [0, 1] -> [0, 255] -> add noise -> [0, 1]\n '
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = ((x * 255) + noise)
x = (x / 256)
return x
|
def update_lr(optimizer, itr):
iter_frac = min((float((itr + 1)) / max(args.warmup_iters, 1)), 1.0)
lr = (args.lr * iter_frac)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def get_train_loader(train_set, epoch):
if (args.batch_size_schedule != ''):
epochs = ([0] + list(map(int, args.batch_size_schedule.split('-'))))
n_passed = sum((np.array(epochs) <= epoch))
current_batch_size = int((args.batch_size * n_passed))
else:
current_batch_size = args.batch_size
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True)
logger.info('===> Using batch size {}. Total {} iterations/epoch.'.format(current_batch_size, len(train_loader)))
return train_loader
|
def get_dataset(args):
trans = (lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
if (args.data == 'mnist'):
im_dim = 1
im_size = (28 if (args.imagesize is None) else args.imagesize)
train_set = dset.MNIST(root='./data', train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root='./data', train=False, transform=trans(im_size), download=True)
elif (args.data == 'svhn'):
im_dim = 3
im_size = (32 if (args.imagesize is None) else args.imagesize)
train_set = dset.SVHN(root='./data', split='train', transform=trans(im_size), download=True)
test_set = dset.SVHN(root='./data', split='test', transform=trans(im_size), download=True)
elif (args.data == 'cifar10'):
im_dim = 3
im_size = (32 if (args.imagesize is None) else args.imagesize)
train_set = dset.CIFAR10(root='./data', train=True, transform=tforms.Compose([tforms.Resize(im_size), tforms.RandomHorizontalFlip(), tforms.ToTensor(), add_noise]), download=True)
test_set = dset.CIFAR10(root='./data', train=False, transform=trans(im_size), download=True)
elif (args.data == 'celeba'):
im_dim = 3
im_size = (64 if (args.imagesize is None) else args.imagesize)
train_set = dset.CelebA(train=True, transform=tforms.Compose([tforms.ToPILImage(), tforms.Resize(im_size), tforms.RandomHorizontalFlip(), tforms.ToTensor(), add_noise]))
test_set = dset.CelebA(train=False, transform=tforms.Compose([tforms.ToPILImage(), tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
elif (args.data == 'lsun_church'):
im_dim = 3
im_size = (64 if (args.imagesize is None) else args.imagesize)
train_set = dset.LSUN('data', ['church_outdoor_train'], transform=tforms.Compose([tforms.Resize(96), tforms.RandomCrop(64), tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
test_set = dset.LSUN('data', ['church_outdoor_val'], transform=tforms.Compose([tforms.Resize(96), tforms.RandomCrop(64), tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
data_shape = (im_dim, im_size, im_size)
if (not args.conv):
data_shape = (((im_dim * im_size) * im_size),)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True)
return (train_set, test_loader, data_shape)
|
def compute_bits_per_dim(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).view(z.shape[0], (- 1)).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
logpx_per_dim = (torch.sum(logpx) / x.nelement())
bits_per_dim = ((- (logpx_per_dim - np.log(256))) / np.log(2))
return bits_per_dim
|
def create_model(args, data_shape, regularization_fns):
hidden_dims = tuple(map(int, args.dims.split(',')))
strides = tuple(map(int, args.strides.split(',')))
if args.multiscale:
model = odenvp.ODENVP((args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, nonlinearity=args.nonlinearity, alpha=args.alpha, cnf_kwargs={'T': args.time_length, 'train_T': args.train_T, 'regularization_fns': regularization_fns})
elif args.parallel:
model = multiscale_parallel.MultiscaleParallelCNF((args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, alpha=args.alpha, time_length=args.time_length)
else:
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.AutoencoderODEfunc(autoencoder_diffeq=autoencoder_diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, regularization_fns=regularization_fns, solver=args.solver)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.ODEfunc(diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver)
return cnf
chain = ([layers.LogitTransform(alpha=args.alpha)] if (args.alpha > 0) else [layers.ZeroMeanTransform()])
chain = (chain + [build_cnf() for _ in range(args.num_blocks)])
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
return model
|
def batch_iter(X, batch_size=args.batch_size, shuffle=False):
'\n X: feature tensor (shape: num_instances x num_features)\n '
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
(yield X[batch_idxs])
|
def update_lr(optimizer, n_vals_without_improvement):
global ndecs
if ((ndecs == 0) and (n_vals_without_improvement > (args.early_stopping // 3))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 10)
ndecs = 1
elif ((ndecs == 1) and (n_vals_without_improvement > ((args.early_stopping // 3) * 2))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 100)
ndecs = 2
else:
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / (10 ** ndecs))
|
def load_data(name):
if (name == 'bsds300'):
return datasets.BSDS300()
elif (name == 'power'):
return datasets.POWER()
elif (name == 'gas'):
return datasets.GAS()
elif (name == 'hepmass'):
return datasets.HEPMASS()
elif (name == 'miniboone'):
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
|
def build_model(input_dim):
hidden_dims = tuple(map(int, args.dims.split('-')))
chain = []
for i in range(args.depth):
if args.glow:
chain.append(layers.BruteForceLayer(input_dim))
chain.append(layers.MaskedCouplingLayer(input_dim, hidden_dims, 'alternate', swap=((i % 2) == 0)))
if args.batch_norm:
chain.append(layers.MovingBatchNorm1d(input_dim, bn_lag=args.bn_lag))
return layers.SequentialFlow(chain)
|
def compute_loss(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).view(z.shape[0], (- 1)).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
loss = (- torch.mean(logpx))
return loss
|
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=(lambda storage, loc: storage))
model.load_state_dict(checkpt['state_dict'])
return model
|
def construct_model():
if args.nf:
chain = []
for i in range(args.depth):
chain.append(layers.PlanarFlow(2))
return layers.SequentialFlow(chain)
else:
chain = []
for i in range(args.depth):
if args.glow:
chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=((i % 2) == 0)))
return layers.SequentialFlow(chain)
|
def get_transforms(model):
if args.nf:
sample_fn = None
else:
def sample_fn(z, logpz=None):
if (logpz is not None):
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if (logpx is not None):
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return (sample_fn, density_fn)
|
def compute_loss(args, model, batch_size=None):
if (batch_size is None):
batch_size = args.batch_size
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
loss = (- torch.mean(logpx))
return loss
|
def sample_data(data=None, rng=None, batch_size=200):
'data and rng are ignored.'
inds = np.random.choice(int(probs.shape[0]), int(batch_size), p=probs)
m = means[inds]
samples = ((np.random.randn(*m.shape) * std) + m)
return samples
|
def get_transforms(model):
def sample_fn(z, logpz=None):
if (logpz is not None):
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if (logpx is not None):
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return (sample_fn, density_fn)
|
def compute_loss(args, model, batch_size=None):
if (batch_size is None):
batch_size = args.batch_size
x = sample_data(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
loss = (- torch.mean(logpx))
return loss
|
def standard_normal_logprob(z):
logZ = ((- 0.5) * math.log((2 * math.pi)))
return (logZ - (z.pow(2) / 2))
|
def set_cnf_options(args, model):
def _set(module):
if isinstance(module, layers.CNF):
module.solver = args.solver
module.atol = args.atol
module.rtol = args.rtol
if (args.step_size is not None):
module.solver_options['step_size'] = args.step_size
if (args.solver in ['fixed_adams', 'explicit_adams']):
module.solver_options['max_order'] = 4
module.test_solver = (args.test_solver if args.test_solver else args.solver)
module.test_atol = (args.test_atol if args.test_atol else args.atol)
module.test_rtol = (args.test_rtol if args.test_rtol else args.rtol)
if isinstance(module, layers.ODEfunc):
module.rademacher = args.rademacher
module.residual = args.residual
model.apply(_set)
|
def override_divergence_fn(model, divergence_fn):
def _set(module):
if isinstance(module, layers.ODEfunc):
if (divergence_fn == 'brute_force'):
module.divergence_fn = divergence_bf
elif (divergence_fn == 'approximate'):
module.divergence_fn = divergence_approx
model.apply(_set)
|
def count_nfe(model):
class AccNumEvals(object):
def __init__(self):
self.num_evals = 0
def __call__(self, module):
if isinstance(module, layers.ODEfunc):
self.num_evals += module.num_evals()
accumulator = AccNumEvals()
model.apply(accumulator)
return accumulator.num_evals
|
def count_parameters(model):
return sum((p.numel() for p in model.parameters() if p.requires_grad))
|
def count_total_time(model):
class Accumulator(object):
def __init__(self):
self.total_time = 0
def __call__(self, module):
if isinstance(module, layers.CNF):
self.total_time = (self.total_time + (module.sqrt_end_time * module.sqrt_end_time))
accumulator = Accumulator()
model.apply(accumulator)
return accumulator.total_time
|
def add_spectral_norm(model, logger=None):
'Applies spectral norm to all modules within the scope of a CNF.'
def apply_spectral_norm(module):
if ('weight' in module._parameters):
if logger:
logger.info('Adding spectral norm to {}'.format(module))
spectral_norm.inplace_spectral_norm(module, 'weight')
def find_cnf(module):
if isinstance(module, layers.CNF):
module.apply(apply_spectral_norm)
else:
for child in module.children():
find_cnf(child)
find_cnf(model)
|
def spectral_norm_power_iteration(model, n_power_iterations=1):
def recursive_power_iteration(module):
if hasattr(module, spectral_norm.POWER_ITERATION_FN):
getattr(module, spectral_norm.POWER_ITERATION_FN)(n_power_iterations)
model.apply(recursive_power_iteration)
|
def append_regularization_to_log(log_message, regularization_fns, reg_states):
for (i, reg_fn) in enumerate(regularization_fns):
log_message = (((log_message + ' | ') + INV_REGULARIZATION_FNS[reg_fn]) + ': {:.8f}'.format(reg_states[i].item()))
return log_message
|
def create_regularization_fns(args):
regularization_fns = []
regularization_coeffs = []
for (arg_key, reg_fn) in six.iteritems(REGULARIZATION_FNS):
if (getattr(args, arg_key) is not None):
regularization_fns.append(reg_fn)
regularization_coeffs.append(eval(('args.' + arg_key)))
regularization_fns = tuple(regularization_fns)
regularization_coeffs = tuple(regularization_coeffs)
return (regularization_fns, regularization_coeffs)
|
def get_regularization(model, regularization_coeffs):
if (len(regularization_coeffs) == 0):
return None
acc_reg_states = tuple(([0.0] * len(regularization_coeffs)))
for module in model.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(((acc + reg) for (acc, reg) in zip(acc_reg_states, module.get_regularization_states())))
return acc_reg_states
|
def build_model_tabular(args, dims, regularization_fns=None):
hidden_dims = tuple(map(int, args.dims.split('-')))
def build_cnf():
diffeq = layers.ODEnet(hidden_dims=hidden_dims, input_shape=(dims,), strides=None, conv=False, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.ODEfunc(diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver)
return cnf
chain = [build_cnf() for _ in range(args.num_blocks)]
if args.batch_norm:
bn_layers = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag) for _ in range(args.num_blocks)]
bn_chain = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag)]
for (a, b) in zip(chain, bn_layers):
bn_chain.append(a)
bn_chain.append(b)
chain = bn_chain
model = layers.SequentialFlow(chain)
set_cnf_options(args, model)
return model
|
def batch_iter(X, batch_size=args.batch_size, shuffle=False):
'\n X: feature tensor (shape: num_instances x num_features)\n '
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
(yield X[batch_idxs])
|
def update_lr(optimizer, n_vals_without_improvement):
global ndecs
if ((ndecs == 0) and (n_vals_without_improvement > (args.early_stopping // 3))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 10)
ndecs = 1
elif ((ndecs == 1) and (n_vals_without_improvement > ((args.early_stopping // 3) * 2))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 100)
ndecs = 2
else:
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / (10 ** ndecs))
|
def load_data(name):
if (name == 'bsds300'):
return datasets.BSDS300()
elif (name == 'power'):
return datasets.POWER()
elif (name == 'gas'):
return datasets.GAS()
elif (name == 'hepmass'):
return datasets.HEPMASS()
elif (name == 'miniboone'):
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
|
def compute_loss(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).view(z.shape[0], (- 1)).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
loss = (- torch.mean(logpx))
return loss
|
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=(lambda storage, loc: storage))
model.load_state_dict(checkpt['state_dict'])
return model
|
def get_transforms(model):
def sample_fn(z, logpz=None):
if (logpz is not None):
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if (logpx is not None):
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return (sample_fn, density_fn)
|
def compute_loss(args, model, batch_size=None):
if (batch_size is None):
batch_size = args.batch_size
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = (logpz - delta_logp)
loss = (- torch.mean(logpx))
return loss
|
def run(args, kwargs):
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, (('vae_' + args.dataset) + '_'))
snap_dir = (snapshots_path + args.flow)
if (args.flow != 'no_flow'):
snap_dir += (('_' + 'num_flows_') + str(args.num_flows))
if (args.flow == 'orthogonal'):
snap_dir = ((snap_dir + '_num_vectors_') + str(args.num_ortho_vecs))
elif (args.flow == 'orthogonalH'):
snap_dir = ((snap_dir + '_num_householder_') + str(args.num_householder))
elif (args.flow == 'iaf'):
snap_dir = ((snap_dir + '_madehsize_') + str(args.made_h_size))
elif (args.flow == 'permutation'):
snap_dir = (((snap_dir + '_') + 'kernelsize_') + str(args.kernel_size))
elif (args.flow == 'mixed'):
snap_dir = (((snap_dir + '_') + 'num_householder_') + str(args.num_householder))
elif (args.flow == 'cnf_rank'):
snap_dir = ((((((snap_dir + '_rank_') + str(args.rank)) + '_') + args.dims) + '_num_blocks_') + str(args.num_blocks))
elif ('cnf' in args.flow):
snap_dir = ((((snap_dir + '_') + args.dims) + '_num_blocks_') + str(args.num_blocks))
if args.retrain_encoder:
snap_dir = (snap_dir + '_retrain-encoder_')
elif args.evaluate:
snap_dir = (snap_dir + '_evaluate_')
snap_dir = (((snap_dir + '__') + args.model_signature) + '/')
args.snap_dir = snap_dir
if (not os.path.exists(snap_dir)):
os.makedirs(snap_dir)
utils.makedirs(args.snap_dir)
logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
torch.save(args, ((snap_dir + args.flow) + '.config'))
(train_loader, val_loader, test_loader, args) = load_dataset(args, **kwargs)
if (not args.evaluate):
if (args.flow == 'no_flow'):
model = VAE.VAE(args)
elif (args.flow == 'planar'):
model = VAE.PlanarVAE(args)
elif (args.flow == 'iaf'):
model = VAE.IAFVAE(args)
elif (args.flow == 'orthogonal'):
model = VAE.OrthogonalSylvesterVAE(args)
elif (args.flow == 'householder'):
model = VAE.HouseholderSylvesterVAE(args)
elif (args.flow == 'triangular'):
model = VAE.TriangularSylvesterVAE(args)
elif (args.flow == 'cnf'):
model = CNFVAE.CNFVAE(args)
elif (args.flow == 'cnf_bias'):
model = CNFVAE.AmortizedBiasCNFVAE(args)
elif (args.flow == 'cnf_hyper'):
model = CNFVAE.HypernetCNFVAE(args)
elif (args.flow == 'cnf_lyper'):
model = CNFVAE.LypernetCNFVAE(args)
elif (args.flow == 'cnf_rank'):
model = CNFVAE.AmortizedLowRankCNFVAE(args)
else:
raise ValueError('Invalid flow choice')
if args.retrain_encoder:
logger.info(f'Initializing decoder from {args.model_path}')
dec_model = torch.load(args.model_path)
dec_sd = {}
for (k, v) in dec_model.state_dict().items():
if ('p_x' in k):
dec_sd[k] = v
model.load_state_dict(dec_sd, strict=False)
if args.cuda:
logger.info('Model on GPU')
model.cuda()
logger.info(model)
if args.retrain_encoder:
parameters = []
logger.info('Optimizing over:')
for (name, param) in model.named_parameters():
if ('p_x' not in name):
logger.info(name)
parameters.append(param)
else:
parameters = model.parameters()
optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1e-07)
train_loss = []
val_loss = []
best_loss = np.inf
best_bpd = np.inf
e = 0
epoch = 0
train_times = []
for epoch in range(1, (args.epochs + 1)):
t_start = time.time()
tr_loss = train(epoch, train_loader, model, optimizer, args, logger)
train_loss.append(tr_loss)
train_times.append((time.time() - t_start))
logger.info(('One training epoch took %.2f seconds' % (time.time() - t_start)))
(v_loss, v_bpd) = evaluate(val_loader, model, args, logger, epoch=epoch)
val_loss.append(v_loss)
if (v_loss < best_loss):
e = 0
best_loss = v_loss
if (args.input_type != 'binary'):
best_bpd = v_bpd
logger.info('->model saved<-')
torch.save(model, ((snap_dir + args.flow) + '.model'))
elif ((args.early_stopping_epochs > 0) and (epoch >= args.warmup)):
e += 1
if (e > args.early_stopping_epochs):
break
if (args.input_type == 'binary'):
logger.info('--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss))
else:
logger.info('--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss, best_bpd))
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_loss = np.hstack(train_loss)
val_loss = np.array(val_loss)
plot_training_curve(train_loss, val_loss, fname=(snap_dir + ('/training_curve_%s.pdf' % args.flow)))
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
logger.info(('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)))
logger.info(args)
logger.info(('Stopped after %d epochs' % epoch))
logger.info(('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)))
final_model = torch.load(((snap_dir + args.flow) + '.model'))
(validation_loss, validation_bpd) = evaluate(val_loader, final_model, args, logger)
else:
validation_loss = 'N/A'
validation_bpd = 'N/A'
logger.info(f'Loading model from {args.model_path}')
final_model = torch.load(args.model_path)
(test_loss, test_bpd) = evaluate(test_loader, final_model, args, logger, testing=True)
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss))
if (args.input_type != 'binary'):
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
|
def binary_loss_function(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.0):
'\n Computes the binary loss function while summing over batch dimension, not averaged!\n :param recon_x: shape: (batch_size, num_channels, pixel_width, pixel_height), bernoulli parameters p(x=1)\n :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].\n :param z_mu: mean of z_0\n :param z_var: variance of z_0\n :param z_0: first stochastic latent variable\n :param z_k: last stochastic latent variable\n :param ldj: log det jacobian\n :param beta: beta for kl loss\n :return: loss, ce, kl\n '
reconstruction_function = nn.BCELoss(size_average=False)
batch_size = x.size(0)
bce = reconstruction_function(recon_x, x)
log_p_zk = log_normal_standard(z_k, dim=1)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
summed_logs = torch.sum((log_q_z0 - log_p_zk))
summed_ldj = torch.sum(ldj)
kl = (summed_logs - summed_ldj)
loss = (bce + (beta * kl))
loss /= float(batch_size)
bce /= float(batch_size)
kl /= float(batch_size)
return (loss, bce, kl)
|
def multinomial_loss_function(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.0):
'\n Computes the cross entropy loss function while summing over batch dimension, not averaged!\n :param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits\n :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].\n :param z_mu: mean of z_0\n :param z_var: variance of z_0\n :param z_0: first stochastic latent variable\n :param z_k: last stochastic latent variable\n :param ldj: log det jacobian\n :param args: global parameter settings\n :param beta: beta for kl loss\n :return: loss, ce, kl\n '
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
target = (x * (num_classes - 1)).long()
ce = cross_entropy(x_logit, target, size_average=False)
log_p_zk = log_normal_standard(z_k, dim=1)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
summed_logs = torch.sum((log_q_z0 - log_p_zk))
summed_ldj = torch.sum(ldj)
kl = (summed_logs - summed_ldj)
loss = (ce + (beta * kl))
loss /= float(batch_size)
ce /= float(batch_size)
kl /= float(batch_size)
return (loss, ce, kl)
|
def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.0):
'\n Computes the binary loss without averaging or summing over the batch dimension.\n '
batch_size = x.size(0)
if (len(ldj.size()) > 1):
ldj = ldj.view(ldj.size(0), (- 1)).sum((- 1))
bce = (- log_bernoulli(x.view(batch_size, (- 1)), recon_x.view(batch_size, (- 1)), dim=1))
log_p_zk = log_normal_standard(z_k, dim=1)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
logs = (log_q_z0 - log_p_zk)
loss = (bce + (beta * (logs - ldj)))
return loss
|
def multinomial_loss_array(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.0):
'\n Computes the discritezed logistic loss without averaging or summing over the batch dimension.\n '
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
target = (x * (num_classes - 1)).long()
ce = cross_entropy(x_logit, target, size_average=False, reduce=False)
ce = ce.view(batch_size, (- 1)).sum(dim=1)
log_p_zk = log_normal_standard(z_k.view(batch_size, (- 1)), dim=1)
log_q_z0 = log_normal_diag(z_0.view(batch_size, (- 1)), mean=z_mu.view(batch_size, (- 1)), log_var=z_var.log().view(batch_size, (- 1)), dim=1)
logs = (log_q_z0 - log_p_zk)
loss = (ce + (beta * (logs - ldj)))
return loss
|
def cross_entropy(input, target, weight=None, size_average=True, ignore_index=(- 100), reduce=True):
'\n Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes\n instead of only (N, C, d_1, d_2) or (N, C).\n This criterion combines `log_softmax` and `nll_loss` in a single\n function.\n See :class:`~torch.nn.CrossEntropyLoss` for details.\n Args:\n input: Variable :math:`(N, C)` where `C = number of classes`\n target: Variable :math:`(N)` where each value is\n `0 <= targets[i] <= C-1`\n weight (Tensor, optional): a manual rescaling weight given to each\n class. If given, has to be a Tensor of size `C`\n size_average (bool, optional): By default, the losses are averaged\n over observations for each minibatch. However, if the field\n sizeAverage is set to False, the losses are instead summed\n for each minibatch. Ignored if reduce is False. Default: ``True``\n ignore_index (int, optional): Specifies a target value that is ignored\n and does not contribute to the input gradient. When size_average is\n True, the loss is averaged over non-ignored targets. Default: -100\n reduce (bool, optional): By default, the losses are averaged or summed over\n observations for each minibatch depending on size_average. When reduce\n is False, returns a loss per batch element instead and ignores\n size_average. Default: ``True``\n '
return nll_loss(F.log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
|
def nll_loss(input, target, weight=None, size_average=True, ignore_index=(- 100), reduce=True):
'\n Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes\n instead of only (N, C, d_1, d_2) or (N, C).\n The negative log likelihood loss.\n See :class:`~torch.nn.NLLLoss` for details.\n Args:\n input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`\n in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`\n in the case of K-dimensional loss.\n target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,\n or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for\n K-dimensional loss.\n weight (Tensor, optional): a manual rescaling weight given to each\n class. If given, has to be a Tensor of size `C`\n size_average (bool, optional): By default, the losses are averaged\n over observations for each minibatch. If size_average\n is False, the losses are summed for each minibatch. Default: ``True``\n ignore_index (int, optional): Specifies a target value that is ignored\n and does not contribute to the input gradient. When size_average is\n True, the loss is averaged over non-ignored targets. Default: -100\n '
dim = input.dim()
if (dim == 2):
return F.nll_loss(input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
elif (dim == 4):
return F.nll_loss(input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
elif ((dim == 3) or (dim > 4)):
n = input.size(0)
c = input.size(1)
out_size = ((n,) + input.size()[2:])
if (target.size()[1:] != input.size()[2:]):
raise ValueError('Expected target size {}, got {}'.format(out_size, input.size()))
input = input.contiguous().view(n, c, 1, (- 1))
target = target.contiguous().view(n, 1, (- 1))
if reduce:
_loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
return _loss(input, target)
out = F.nll_loss(input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
return out.view(out_size)
else:
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
|
def calculate_loss(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.0):
'\n Picks the correct loss depending on the input type.\n '
if (args.input_type == 'binary'):
(loss, rec, kl) = binary_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, beta=beta)
bpd = 0.0
elif (args.input_type == 'multinomial'):
(loss, rec, kl) = multinomial_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=beta)
bpd = (loss.data[0] / (np.prod(args.input_size) * np.log(2.0)))
else:
raise ValueError(('Invalid input type for calculate loss: %s.' % args.input_type))
return (loss, rec, kl, bpd)
|
def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args):
'\n Picks the correct loss depending on the input type.\n '
if (args.input_type == 'binary'):
loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj)
elif (args.input_type == 'multinomial'):
loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args)
else:
raise ValueError(('Invalid input type for calculate loss: %s.' % args.input_type))
return loss
|
def train(epoch, train_loader, model, opt, args, logger):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
beta = min([((epoch * 1.0) / max([args.warmup, 1.0])), args.max_beta])
logger.info('beta = {:5.4f}'.format(beta))
end = time.time()
for (batch_idx, (data, _)) in enumerate(train_loader):
if args.cuda:
data = data.cuda()
if args.dynamic_binarization:
data = torch.bernoulli(data)
data = data.view((- 1), *args.input_size)
opt.zero_grad()
(x_mean, z_mu, z_var, ldj, z0, zk) = model(data)
if ('cnf' in args.flow):
f_nfe = count_nfe(model)
(loss, rec, kl, bpd) = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args, beta=beta)
loss.backward()
if ('cnf' in args.flow):
t_nfe = count_nfe(model)
b_nfe = (t_nfe - f_nfe)
train_loss[batch_idx] = loss.item()
train_bpd[batch_idx] = bpd
opt.step()
rec = rec.item()
kl = kl.item()
num_data += len(data)
batch_time = (time.time() - end)
end = time.time()
if ((batch_idx % args.log_interval) == 0):
if (args.input_type == 'binary'):
perc = ((100.0 * batch_idx) / len(train_loader))
log_msg = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Rec {:11.6f} | KL {:11.6f}'.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), rec, kl)
else:
perc = ((100.0 * batch_idx) / len(train_loader))
tmp = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Bits/dim {:8.6f}'
log_msg = (tmp.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), bpd), '\trec: {:11.3f}\tkl: {:11.6f}'.format(rec, kl))
log_msg = ''.join(log_msg)
if ('cnf' in args.flow):
log_msg += ' | NFE Forward {} | NFE Backward {}'.format(f_nfe, b_nfe)
logger.info(log_msg)
if (args.input_type == 'binary'):
logger.info('====> Epoch: {:3d} Average train loss: {:.4f}'.format(epoch, (train_loss.sum() / len(train_loader))))
else:
logger.info('====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'.format(epoch, (train_loss.sum() / len(train_loader)), (train_bpd.sum() / len(train_loader))))
return train_loss
|
def evaluate(data_loader, model, args, logger, testing=False, epoch=0):
model.eval()
loss = 0.0
batch_idx = 0
bpd = 0.0
if (args.input_type == 'binary'):
loss_type = 'elbo'
else:
loss_type = 'bpd'
if (testing and ('cnf' in args.flow)):
override_divergence_fn(model, 'brute_force')
for (data, _) in data_loader:
batch_idx += 1
if args.cuda:
data = data.cuda()
with torch.no_grad():
data = data.view((- 1), *args.input_size)
(x_mean, z_mu, z_var, ldj, z0, zk) = model(data)
(batch_loss, rec, kl, batch_bpd) = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args)
bpd += batch_bpd
loss += batch_loss.item()
if ((batch_idx == 1) and (testing is False)):
plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args)
loss /= len(data_loader)
bpd /= len(data_loader)
if testing:
logger.info('====> Test set loss: {:.4f}'.format(loss))
if (testing and (not ('cnf' in args.flow))):
with torch.no_grad():
test_data = data_loader.dataset.tensors[0]
if args.cuda:
test_data = test_data.cuda()
logger.info('Computing log-likelihood on test set')
model.eval()
if (args.dataset == 'caltech'):
(log_likelihood, nll_bpd) = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500)
else:
(log_likelihood, nll_bpd) = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500)
if ('cnf' in args.flow):
override_divergence_fn(model, args.divergence_fn)
else:
log_likelihood = None
nll_bpd = None
if (args.input_type in ['multinomial']):
bpd = (loss / (np.prod(args.input_size) * np.log(2.0)))
if (testing and (not ('cnf' in args.flow))):
logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood))
if (args.input_type != 'binary'):
logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd))
logger.info('====> Test set bpd (log-likelihood): {:.4f}'.format((log_likelihood / (np.prod(args.input_size) * np.log(2.0)))))
if (not testing):
return (loss, bpd)
else:
return (log_likelihood, nll_bpd)
|
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = ((- 0.5) * (log_var + (((x - mean) * (x - mean)) * log_var.exp().reciprocal())))
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
|
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = ((- (x - mean)) * (x - mean))
log_norm *= torch.reciprocal((2.0 * log_var.exp()))
log_norm += ((- 0.5) * log_var)
log_norm += ((- 0.5) * torch.log((2.0 * PI)))
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
|
def log_normal_standard(x, average=False, reduce=True, dim=None):
log_norm = (((- 0.5) * x) * x)
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
|
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = ((x * torch.log(probs)) + ((1.0 - x) * torch.log((1.0 - probs))))
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
return torch.sum(log_bern, dim)
else:
return log_bern
|
def load_static_mnist(args, **kwargs):
'\n Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784\n '
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
np.random.shuffle(x_train)
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, test_loader, args)
|
def load_freyfaces(args, **kwargs):
args.input_size = [1, 28, 20]
args.input_type = 'multinomial'
args.dynamic_binarization = False
TRAIN = 1565
VAL = 200
TEST = 200
with open('data/Freyfaces/freyfaces.pkl', 'rb') as f:
data = pickle.load(f, encoding='latin1')[0]
data = (data / 255.0)
np.random.seed(args.freyseed)
np.random.shuffle(data)
x_train = data[0:TRAIN].reshape((- 1), (28 * 20))
x_val = data[TRAIN:(TRAIN + VAL)].reshape((- 1), (28 * 20))
x_test = data[(TRAIN + VAL):((TRAIN + VAL) + TEST)].reshape((- 1), (28 * 20))
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, test_loader, args)
|
def load_omniglot(args, **kwargs):
n_validation = 1345
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = True
def reshape_data(data):
return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F')
omni_raw = loadmat(os.path.join('data', 'OMNIGLOT', 'chardata.mat'))
train_data = reshape_data(omni_raw['data'].T.astype('float32'))
x_test = reshape_data(omni_raw['testdata'].T.astype('float32'))
np.random.shuffle(train_data)
x_train = train_data[:(- n_validation)]
x_val = train_data[(- n_validation):]
if args.dynamic_binarization:
args.input_type = 'binary'
np.random.seed(777)
x_val = np.random.binomial(1, x_val)
x_test = np.random.binomial(1, x_test)
else:
args.input_type = 'gray'
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, test_loader, args)
|
def load_caltech101silhouettes(args, **kwargs):
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = False
def reshape_data(data):
return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F')
caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat'))
x_train = (1.0 - reshape_data(caltech_raw['train_data'].astype('float32')))
np.random.shuffle(x_train)
x_val = (1.0 - reshape_data(caltech_raw['val_data'].astype('float32')))
np.random.shuffle(x_val)
x_test = (1.0 - reshape_data(caltech_raw['test_data'].astype('float32')))
y_train = caltech_raw['train_labels']
y_val = caltech_raw['val_labels']
y_test = caltech_raw['test_labels']
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, test_loader, args)
|
def load_dataset(args, **kwargs):
if (args.dataset == 'mnist'):
(train_loader, val_loader, test_loader, args) = load_static_mnist(args, **kwargs)
elif (args.dataset == 'caltech'):
(train_loader, val_loader, test_loader, args) = load_caltech101silhouettes(args, **kwargs)
elif (args.dataset == 'freyfaces'):
(train_loader, val_loader, test_loader, args) = load_freyfaces(args, **kwargs)
elif (args.dataset == 'omniglot'):
(train_loader, val_loader, test_loader, args) = load_omniglot(args, **kwargs)
else:
raise Exception('Wrong name of the dataset!')
return (train_loader, val_loader, test_loader, args)
|
def calculate_likelihood(X, model, args, logger, S=5000, MB=500):
N_test = X.size(0)
X = X.view((- 1), *args.input_size)
likelihood_test = []
if (S <= MB):
R = 1
else:
R = (S // MB)
S = MB
end = time.time()
for j in range(N_test):
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, R):
x = x_single.expand(S, *x_single.size()[1:]).contiguous()
(x_mean, z_mu, z_var, ldj, z0, zk) = model(x)
a_tmp = calculate_loss_array(x_mean, x, z_mu, z_var, z0, zk, ldj, args)
a.append((- a_tmp.cpu().data.numpy()))
a = np.asarray(a)
a = np.reshape(a, ((a.shape[0] * a.shape[1]), 1))
likelihood_x = logsumexp(a)
likelihood_test.append((likelihood_x - np.log(len(a))))
if ((j % 1) == 0):
logger.info('Progress: {:.2f}% | Time: {:.4f}'.format(((j / (1.0 * N_test)) * 100), (time.time() - end)))
end = time.time()
likelihood_test = np.array(likelihood_test)
nll = (- np.mean(likelihood_test))
if (args.input_type == 'multinomial'):
bpd = (nll / (np.prod(args.input_size) * np.log(2.0)))
elif (args.input_type == 'binary'):
bpd = 0.0
else:
raise ValueError('invalid input type!')
return (nll, bpd)
|
def plot_training_curve(train_loss, validation_loss, fname='training_curve.pdf', labels=None):
'\n Plots train_loss and validation loss as a function of optimization iteration\n :param train_loss: np.array of train_loss (1D or 2D)\n :param validation_loss: np.array of validation loss (1D or 2D)\n :param fname: output file name\n :param labels: if train_loss and validation loss are 2D, then labels indicate which variable is varied\n accross training curves.\n :return: None\n '
plt.close()
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
if (len(train_loss.shape) == 1):
(fig, ax) = plt.subplots(nrows=1, ncols=1)
figsize = (6, 4)
if (train_loss.shape[0] == validation_loss.shape[0]):
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss, '-', lw=2.0, color='black', label='train')
ax.plot(x, validation_loss, '-', lw=2.0, color='blue', label='val')
elif ((train_loss.shape[0] % validation_loss.shape[0]) == 0):
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss, '-', lw=2.0, color='black', label='train')
x = np.arange(validation_loss.shape[0])
x = (((x + 1) * train_loss.shape[0]) / validation_loss.shape[0])
ax.plot(x, validation_loss, '-', lw=2.0, color='blue', label='val')
else:
raise ValueError('Length of train_loss and validation_loss must be equal or divisible')
miny = (np.minimum(validation_loss.min(), train_loss.min()) - 20.0)
maxy = (np.maximum(validation_loss.max(), train_loss.max()) + 30.0)
ax.set_ylim([miny, maxy])
elif (len(train_loss.shape) == 2):
cmap = plt.cm.brg
cNorm = matplotlib.colors.Normalize(vmin=0, vmax=train_loss.shape[0])
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
(fig, ax) = plt.subplots(nrows=1, ncols=1)
figsize = (6, 4)
if (labels is None):
labels = [('%d' % i) for i in range(train_loss.shape[0])]
if (train_loss.shape[1] == validation_loss.shape[1]):
for i in range(train_loss.shape[0]):
color_val = scalarMap.to_rgba(i)
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss[i], '-', lw=2.0, color=color_val, label=labels[i])
ax.plot(x, validation_loss[i], '--', lw=2.0, color=color_val)
elif ((train_loss.shape[1] % validation_loss.shape[1]) == 0):
for i in range(train_loss.shape[0]):
color_val = scalarMap.to_rgba(i)
x = np.arange(train_loss.shape[1])
ax.plot(x, train_loss[i], '-', lw=2.0, color=color_val, label=labels[i])
x = np.arange(validation_loss.shape[1])
x = (((x + 1) * train_loss.shape[1]) / validation_loss.shape[1])
ax.plot(x, validation_loss[i], '-', lw=2.0, color=color_val)
miny = (np.minimum(validation_loss.min(), train_loss.min()) - 20.0)
maxy = (np.maximum(validation_loss.max(), train_loss.max()) + 30.0)
ax.set_ylim([miny, maxy])
else:
raise ValueError('train_loss and validation_loss must be 1D or 2D arrays')
ax.set_xlabel('iteration')
ax.set_ylabel('loss')
plt.title('Training and validation loss')
fig.set_size_inches(figsize)
fig.subplots_adjust(hspace=0.1)
plt.savefig(fname, bbox_inches='tight')
plt.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.