code
stringlengths 17
6.64M
|
---|
class CelebA5bit(object):
LOC = 'data/celebahq64_5bit/celeba_full_64x64_5bit.pth'
def __init__(self, train=True, transform=None):
self.dataset = torch.load(self.LOC).float().div(31)
if (not train):
self.dataset = self.dataset[:5000]
self.transform = transform
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
x = self.dataset[index]
x = (self.transform(x) if (self.transform is not None) else x)
return (x, 0)
|
class CelebAHQ(Dataset):
TRAIN_LOC = 'data/celebahq/celeba256_train.pth'
TEST_LOC = 'data/celebahq/celeba256_validation.pth'
def __init__(self, train=True, transform=None):
return super(CelebAHQ, self).__init__((self.TRAIN_LOC if train else self.TEST_LOC), transform)
|
class Imagenet32(Dataset):
TRAIN_LOC = 'data/imagenet32/train_32x32.pth'
TEST_LOC = 'data/imagenet32/valid_32x32.pth'
def __init__(self, train=True, transform=None):
return super(Imagenet32, self).__init__((self.TRAIN_LOC if train else self.TEST_LOC), transform)
|
class Imagenet64(Dataset):
TRAIN_LOC = 'data/imagenet64/train_64x64.pth'
TEST_LOC = 'data/imagenet64/valid_64x64.pth'
def __init__(self, train=True, transform=None):
return super(Imagenet64, self).__init__((self.TRAIN_LOC if train else self.TEST_LOC), transform, in_mem=False)
|
class ActNormNd(nn.Module):
def __init__(self, num_features, eps=1e-12):
super(ActNormNd, self).__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
self.register_buffer('initialized', torch.tensor(0))
@property
def shape(self):
raise NotImplementedError
def forward(self, x, logpx=None):
c = x.size(1)
if (not self.initialized):
with torch.no_grad():
x_t = x.transpose(0, 1).contiguous().view(c, (- 1))
batch_mean = torch.mean(x_t, dim=1)
batch_var = torch.var(x_t, dim=1)
batch_var = torch.max(batch_var, torch.tensor(0.2).to(batch_var))
self.bias.data.copy_((- batch_mean))
self.weight.data.copy_(((- 0.5) * torch.log(batch_var)))
self.initialized.fill_(1)
bias = self.bias.view(*self.shape).expand_as(x)
weight = self.weight.view(*self.shape).expand_as(x)
y = ((x + bias) * torch.exp(weight))
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad(x)))
def inverse(self, y, logpy=None):
assert self.initialized
bias = self.bias.view(*self.shape).expand_as(y)
weight = self.weight.view(*self.shape).expand_as(y)
x = ((y * torch.exp((- weight))) - bias)
if (logpy is None):
return x
else:
return (x, (logpy + self._logdetgrad(x)))
def _logdetgrad(self, x):
return self.weight.view(*self.shape).expand(*x.size()).contiguous().view(x.size(0), (- 1)).sum(1, keepdim=True)
def __repr__(self):
return '{name}({num_features})'.format(name=self.__class__.__name__, **self.__dict__)
|
class ActNorm1d(ActNormNd):
@property
def shape(self):
return [1, (- 1)]
|
class ActNorm2d(ActNormNd):
@property
def shape(self):
return [1, (- 1), 1, 1]
|
class Identity(nn.Module):
def forward(self, x):
return x
|
class FullSort(nn.Module):
def forward(self, x):
return torch.sort(x, 1)[0]
|
class MaxMin(nn.Module):
def forward(self, x):
(b, d) = x.shape
max_vals = torch.max(x.view(b, (d // 2), 2), 2)[0]
min_vals = torch.min(x.view(b, (d // 2), 2), 2)[0]
return torch.cat([max_vals, min_vals], 1)
|
class LipschitzCube(nn.Module):
def forward(self, x):
return ((((x >= 1).to(x) * (x - (2 / 3))) + ((x <= (- 1)).to(x) * (x + (2 / 3)))) + ((((x > (- 1)) * (x < 1)).to(x) * (x ** 3)) / 3))
|
class SwishFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, beta):
beta_sigm = torch.sigmoid((beta * x))
output = (x * beta_sigm)
ctx.save_for_backward(x, output, beta)
return (output / 1.1)
@staticmethod
def backward(ctx, grad_output):
(x, output, beta) = ctx.saved_tensors
beta_sigm = (output / x)
grad_x = (grad_output * ((beta * output) + (beta_sigm * (1 - (beta * output)))))
grad_beta = torch.sum((grad_output * ((x * output) - (output * output)))).expand_as(beta)
return ((grad_x / 1.1), (grad_beta / 1.1))
|
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor([0.5]))
def forward(self, x):
return (x * torch.sigmoid_((x * F.softplus(self.beta)))).div_(1.1)
|
class SpectralNormLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, coeff=0.97, n_iterations=None, atol=None, rtol=None, **unused_kwargs):
del unused_kwargs
super(SpectralNormLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.coeff = coeff
self.n_iterations = n_iterations
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
(h, w) = self.weight.shape
self.register_buffer('scale', torch.tensor(0.0))
self.register_buffer('u', F.normalize(self.weight.new_empty(h).normal_(0, 1), dim=0))
self.register_buffer('v', F.normalize(self.weight.new_empty(w).normal_(0, 1), dim=0))
self.compute_weight(True, 200)
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
if (n_iterations is None):
n_iterations = 20000
u = self.u
v = self.v
weight = self.weight
if update:
with torch.no_grad():
itrs_used = 0.0
for _ in range(n_iterations):
old_v = v.clone()
old_u = u.clone()
v = F.normalize(torch.mv(weight.t(), u), dim=0, out=v)
u = F.normalize(torch.mv(weight, v), dim=0, out=u)
itrs_used = (itrs_used + 1)
if ((atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight
def forward(self, input):
weight = self.compute_weight(update=self.training)
return F.linear(input, weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, coeff={}, n_iters={}, atol={}, rtol={}'.format(self.in_features, self.out_features, (self.bias is not None), self.coeff, self.n_iterations, self.atol, self.rtol)
|
class SpectralNormConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, n_iterations=None, atol=None, rtol=None, **unused_kwargs):
del unused_kwargs
super(SpectralNormConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.coeff = coeff
self.n_iterations = n_iterations
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.initialized = False
self.register_buffer('spatial_dims', torch.tensor([1.0, 1.0]))
self.register_buffer('scale', torch.tensor(0.0))
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def _initialize_u_v(self):
if (self.kernel_size == (1, 1)):
self.register_buffer('u', F.normalize(self.weight.new_empty(self.out_channels).normal_(0, 1), dim=0))
self.register_buffer('v', F.normalize(self.weight.new_empty(self.in_channels).normal_(0, 1), dim=0))
else:
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
with torch.no_grad():
num_input_dim = ((c * h) * w)
v = F.normalize(torch.randn(num_input_dim).to(self.weight), dim=0, eps=1e-12)
u = F.conv2d(v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None)
num_output_dim = (((u.shape[0] * u.shape[1]) * u.shape[2]) * u.shape[3])
self.out_shape = u.shape
u = F.normalize(torch.randn(num_output_dim).to(self.weight), dim=0, eps=1e-12)
self.register_buffer('u', u)
self.register_buffer('v', v)
def compute_weight(self, update=True, n_iterations=None):
if (not self.initialized):
self._initialize_u_v()
self.initialized = True
if (self.kernel_size == (1, 1)):
return self._compute_weight_1x1(update, n_iterations)
else:
return self._compute_weight_kxk(update, n_iterations)
def _compute_weight_1x1(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
if (n_iterations is None):
n_iterations = 20000
u = self.u
v = self.v
weight = self.weight.view(self.out_channels, self.in_channels)
if update:
with torch.no_grad():
itrs_used = 0
for _ in range(n_iterations):
old_v = v.clone()
old_u = u.clone()
v = F.normalize(torch.mv(weight.t(), u), dim=0, out=v)
u = F.normalize(torch.mv(weight, v), dim=0, out=u)
itrs_used = (itrs_used + 1)
if ((atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight.view(self.out_channels, self.in_channels, 1, 1)
def _compute_weight_kxk(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
if (n_iterations is None):
n_iterations = 20000
u = self.u
v = self.v
weight = self.weight
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
if update:
with torch.no_grad():
itrs_used = 0
for _ in range(n_iterations):
old_u = u.clone()
old_v = v.clone()
v_s = F.conv_transpose2d(u.view(self.out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0)
v = F.normalize(v_s.view((- 1)), dim=0, out=v)
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
u = F.normalize(u_s.view((- 1)), dim=0, out=u)
itrs_used = (itrs_used + 1)
if ((atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
u = u.clone()
v = v.clone()
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
weight_v = weight_v.view((- 1))
sigma = torch.dot(u.view((- 1)), weight_v)
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight
def forward(self, input):
if (not self.initialized):
self.spatial_dims.copy_(torch.tensor(input.shape[2:4]).to(self.spatial_dims))
weight = self.compute_weight(update=self.training)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
s = '{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if (self.padding != ((0,) * len(self.padding))):
s += ', padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ', coeff={}, n_iters={}, atol={}, rtol={}'.format(self.coeff, self.n_iterations, self.atol, self.rtol)
return s.format(**self.__dict__)
|
class LopLinear(nn.Linear):
'Lipschitz constant defined using operator norms.'
def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LopLinear, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
(max_across_input_dims, self.norm_type) = operator_norm_settings(self.domain, self.codomain)
self.max_across_dim = (1 if max_across_input_dims else 0)
self.register_buffer('scale', torch.tensor(0.0))
def compute_weight(self):
scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
scale = scale.max()
with torch.no_grad():
self.scale.copy_(scale.max())
factor = torch.max(torch.ones(1).to(self.weight), (scale / self.coeff))
return (self.weight / factor)
def forward(self, input):
weight = self.compute_weight()
return F.linear(input, weight, self.bias)
def extra_repr(self):
s = super(LopLinear, self).extra_repr()
return (s + ', coeff={}, domain={}, codomain={}, local={}'.format(self.coeff, self.domain, self.codomain, self.local_constraint))
|
class LopConv2d(nn.Conv2d):
'Lipschitz constant defined using operator norms.'
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LopConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
(max_across_input_dims, self.norm_type) = operator_norm_settings(self.domain, self.codomain)
self.max_across_dim = (1 if max_across_input_dims else 0)
self.register_buffer('scale', torch.tensor(0.0))
def compute_weight(self):
scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
scale = scale.max()
with torch.no_grad():
self.scale.copy_(scale.max())
factor = torch.max(torch.ones(1).to(self.weight.device), (scale / self.coeff))
return (self.weight / factor)
def forward(self, input):
weight = self.compute_weight()
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
s = super(LopConv2d, self).extra_repr()
return (s + ', coeff={}, domain={}, codomain={}, local={}'.format(self.coeff, self.domain, self.codomain, self.local_constraint))
|
class LipNormLinear(nn.Linear):
'Lipschitz constant defined using operator norms.'
def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormLinear, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
(max_across_input_dims, self.norm_type) = operator_norm_settings(self.domain, self.codomain)
self.max_across_dim = (1 if max_across_input_dims else 0)
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit((w_scale / self.coeff)))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
return (((self.weight / w_scale) * torch.sigmoid(self.scale)) * self.coeff)
def forward(self, input):
weight = self.compute_weight()
return F.linear(input, weight, self.bias)
def extra_repr(self):
s = super(LipNormLinear, self).extra_repr()
return (s + ', coeff={}, domain={}, codomain={}, local={}'.format(self.coeff, self.domain, self.codomain, self.local_constraint))
|
class LipNormConv2d(nn.Conv2d):
'Lipschitz constant defined using operator norms.'
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
(max_across_input_dims, self.norm_type) = operator_norm_settings(self.domain, self.codomain)
self.max_across_dim = (1 if max_across_input_dims else 0)
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit((w_scale / self.coeff)))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
return ((self.weight / w_scale) * torch.sigmoid(self.scale))
def forward(self, input):
weight = self.compute_weight()
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
s = super(LipNormConv2d, self).extra_repr()
return (s + ', coeff={}, domain={}, codomain={}, local={}'.format(self.coeff, self.domain, self.codomain, self.local_constraint))
|
def _logit(p):
p = torch.max((torch.ones(1) * 0.1), torch.min((torch.ones(1) * 0.9), p))
return (torch.log((p + 1e-10)) + torch.log(((1 - p) + 1e-10)))
|
def _norm_except_dim(w, norm_type, dim):
if ((norm_type == 1) or (norm_type == 2)):
return torch.norm_except_dim(w, norm_type, dim)
elif (norm_type == float('inf')):
return _max_except_dim(w, dim)
|
def _max_except_dim(input, dim):
maxed = input
for axis in range((input.ndimension() - 1), dim, (- 1)):
(maxed, _) = maxed.max(axis, keepdim=True)
for axis in range((dim - 1), (- 1), (- 1)):
(maxed, _) = maxed.max(axis, keepdim=True)
return maxed
|
def operator_norm_settings(domain, codomain):
if ((domain == 1) and (codomain == 1)):
max_across_input_dims = True
norm_type = 1
elif ((domain == 1) and (codomain == 2)):
max_across_input_dims = True
norm_type = 2
elif ((domain == 1) and (codomain == float('inf'))):
max_across_input_dims = True
norm_type = float('inf')
elif ((domain == 2) and (codomain == float('inf'))):
max_across_input_dims = False
norm_type = 2
elif ((domain == float('inf')) and (codomain == float('inf'))):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'.format(domain, codomain))
return (max_across_input_dims, norm_type)
|
def get_linear(in_features, out_features, bias=True, coeff=0.97, domain=None, codomain=None, **kwargs):
_linear = InducedNormLinear
if (domain == 1):
if (codomain in [1, 2, float('inf')]):
_linear = LopLinear
elif (codomain == float('inf')):
if (domain in [2, float('inf')]):
_linear = LopLinear
return _linear(in_features, out_features, bias, coeff, domain, codomain, **kwargs)
|
def get_conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=None, codomain=None, **kwargs):
_conv2d = InducedNormConv2d
if (domain == 1):
if (codomain in [1, 2, float('inf')]):
_conv2d = LopConv2d
elif (codomain == float('inf')):
if (domain in [2, float('inf')]):
_conv2d = LopConv2d
return _conv2d(in_channels, out_channels, kernel_size, stride, padding, bias, coeff, domain, codomain, **kwargs)
|
class InducedNormLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=2, codomain=2, n_iterations=None, atol=None, rtol=None, zero_init=False, **unused_kwargs):
del unused_kwargs
super(InducedNormLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.coeff = coeff
self.n_iterations = n_iterations
self.atol = atol
self.rtol = rtol
self.domain = domain
self.codomain = codomain
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(zero_init)
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
(h, w) = self.weight.shape
self.register_buffer('scale', torch.tensor(0.0))
self.register_buffer('u', normalize_u(self.weight.new_empty(h).normal_(0, 1), codomain))
self.register_buffer('v', normalize_v(self.weight.new_empty(w).normal_(0, 1), domain))
with torch.no_grad():
self.compute_weight(True, n_iterations=200, atol=None, rtol=None)
best_scale = self.scale.clone()
(best_u, best_v) = (self.u.clone(), self.v.clone())
if (not ((domain == 2) and (codomain == 2))):
for _ in range(10):
self.register_buffer('u', normalize_u(self.weight.new_empty(h).normal_(0, 1), codomain))
self.register_buffer('v', normalize_v(self.weight.new_empty(w).normal_(0, 1), domain))
self.compute_weight(True, n_iterations=200)
if (self.scale > best_scale):
(best_u, best_v) = (self.u.clone(), self.v.clone())
self.u.copy_(best_u)
self.v.copy_(best_v)
def reset_parameters(self, zero_init=False):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if zero_init:
self.weight.data.div_(1000)
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def compute_domain_codomain(self):
if torch.is_tensor(self.domain):
domain = asym_squash(self.domain)
codomain = asym_squash(self.codomain)
else:
(domain, codomain) = (self.domain, self.codomain)
return (domain, codomain)
def compute_one_iter(self):
(domain, codomain) = self.compute_domain_codomain()
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
u = self.u
v = self.v
weight = self.weight
if update:
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if (n_iterations is not None):
max_itrs = n_iterations
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
for _ in range(max_itrs):
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
old_v = v.clone()
old_u = u.clone()
u = normalize_u(torch.mv(weight, v), codomain, out=u)
v = normalize_v(torch.mv(weight.t(), u), domain, out=v)
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
self.v.copy_(v)
self.u.copy_(u)
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight
def forward(self, input):
weight = self.compute_weight(update=False)
return F.linear(input, weight, self.bias)
def extra_repr(self):
(domain, codomain) = self.compute_domain_codomain()
return 'in_features={}, out_features={}, bias={}, coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(self.in_features, self.out_features, (self.bias is not None), self.coeff, domain, codomain, self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain))
|
class InducedNormConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=2, codomain=2, n_iterations=None, atol=None, rtol=None, **unused_kwargs):
del unused_kwargs
super(InducedNormConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.coeff = coeff
self.n_iterations = n_iterations
self.domain = domain
self.codomain = codomain
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.register_buffer('initialized', torch.tensor(0))
self.register_buffer('spatial_dims', torch.tensor([1.0, 1.0]))
self.register_buffer('scale', torch.tensor(0.0))
self.register_buffer('u', self.weight.new_empty(self.out_channels))
self.register_buffer('v', self.weight.new_empty(self.in_channels))
def compute_domain_codomain(self):
if torch.is_tensor(self.domain):
domain = asym_squash(self.domain)
codomain = asym_squash(self.codomain)
else:
(domain, codomain) = (self.domain, self.codomain)
return (domain, codomain)
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def _initialize_u_v(self):
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
if (self.kernel_size == (1, 1)):
self.u.resize_(self.out_channels).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.v.resize_(self.in_channels).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
else:
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
with torch.no_grad():
num_input_dim = ((c * h) * w)
self.v.resize_(num_input_dim).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
u = F.conv2d(self.v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None)
num_output_dim = (((u.shape[0] * u.shape[1]) * u.shape[2]) * u.shape[3])
self.u.resize_(num_output_dim).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.initialized.fill_(1)
self.compute_weight(True)
best_scale = self.scale.clone()
(best_u, best_v) = (self.u.clone(), self.v.clone())
if (not ((domain == 2) and (codomain == 2))):
for _ in range(10):
if (self.kernel_size == (1, 1)):
self.u.copy_(normalize_u(self.weight.new_empty(self.out_channels).normal_(0, 1), codomain))
self.v.copy_(normalize_v(self.weight.new_empty(self.in_channels).normal_(0, 1), domain))
else:
self.u.copy_(normalize_u(torch.randn(num_output_dim).to(self.weight), codomain))
self.v.copy_(normalize_v(torch.randn(num_input_dim).to(self.weight), domain))
self.compute_weight(True, n_iterations=200)
if (self.scale > best_scale):
(best_u, best_v) = (self.u.clone(), self.v.clone())
self.u.copy_(best_u)
self.v.copy_(best_v)
def compute_one_iter(self):
if (not self.initialized):
raise ValueError('Layer needs to be initialized first.')
(domain, codomain) = self.compute_domain_codomain()
if (self.kernel_size == (1, 1)):
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach().view(self.out_channels, self.in_channels)
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
else:
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view((- 1)), codomain)
v_s = F.conv_transpose2d(u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0)
v = normalize_v(v_s.view((- 1)), domain)
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
return torch.dot(u.view((- 1)), weight_v.view((- 1)))
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
if (not self.initialized):
self._initialize_u_v()
if (self.kernel_size == (1, 1)):
return self._compute_weight_1x1(update, n_iterations, atol, rtol)
else:
return self._compute_weight_kxk(update, n_iterations, atol, rtol)
def _compute_weight_1x1(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if (n_iterations is not None):
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight.view(self.out_channels, self.in_channels)
if update:
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_v = v.clone()
old_u = u.clone()
u = normalize_u(torch.mv(weight, v), codomain, out=u)
v = normalize_v(torch.mv(weight.t(), u), domain, out=v)
itrs_used = (itrs_used + 1)
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
if ((domain != 1) and (domain != 2)):
self.v.copy_(v)
if ((codomain != 2) and (codomain != float('inf'))):
self.u.copy_(u)
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight.view(self.out_channels, self.in_channels, 1, 1)
def _compute_weight_kxk(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if (n_iterations is not None):
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
if update:
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_u = u.clone()
old_v = v.clone()
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view((- 1)), codomain, out=u)
v_s = F.conv_transpose2d(u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0)
v = normalize_v(v_s.view((- 1)), domain, out=v)
itrs_used = (itrs_used + 1)
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
if (domain != 2):
self.v.copy_(v)
if (codomain != 2):
self.u.copy_(u)
v = v.clone()
u = u.clone()
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
weight_v = weight_v.view((- 1))
sigma = torch.dot(u.view((- 1)), weight_v)
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight
def forward(self, input):
if (not self.initialized):
self.spatial_dims.copy_(torch.tensor(input.shape[2:4]).to(self.spatial_dims))
weight = self.compute_weight(update=False)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
(domain, codomain) = self.compute_domain_codomain()
s = '{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if (self.padding != ((0,) * len(self.padding))):
s += ', padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ', coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(self.coeff, domain, codomain, self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain))
return s.format(**self.__dict__)
|
def projmax_(v):
'Inplace argmax on absolute value.'
ind = torch.argmax(torch.abs(v))
v.zero_()
v[ind] = 1
return v
|
def normalize_v(v, domain, out=None):
if ((not torch.is_tensor(domain)) and (domain == 2)):
v = F.normalize(v, p=2, dim=0, out=out)
elif (domain == 1):
v = projmax_(v)
else:
vabs = torch.abs(v)
vph = (v / vabs)
vph[torch.isnan(vph)] = 1
vabs = (vabs / torch.max(vabs))
vabs = (vabs ** (1 / (domain - 1)))
v = ((vph * vabs) / vector_norm(vabs, domain))
return v
|
def normalize_u(u, codomain, out=None):
if ((not torch.is_tensor(codomain)) and (codomain == 2)):
u = F.normalize(u, p=2, dim=0, out=out)
elif (codomain == float('inf')):
u = projmax_(u)
else:
uabs = torch.abs(u)
uph = (u / uabs)
uph[torch.isnan(uph)] = 1
uabs = (uabs / torch.max(uabs))
uabs = (uabs ** (codomain - 1))
if (codomain == 1):
u = ((uph * uabs) / vector_norm(uabs, float('inf')))
else:
u = ((uph * uabs) / vector_norm(uabs, (codomain / (codomain - 1))))
return u
|
def vector_norm(x, p):
x = x.view((- 1))
return (torch.sum((x ** p)) ** (1 / p))
|
def leaky_elu(x, a=0.3):
return ((a * x) + ((1 - a) * F.elu(x)))
|
def asym_squash(x):
return ((torch.tanh((- leaky_elu(((- x) + 0.5493061829986572)))) * 2) + 3)
|
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
|
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
|
class SequentialFlow(nn.Module):
'A generalized nn.Sequential container for normalizing flows.\n '
def __init__(self, layersList):
super(SequentialFlow, self).__init__()
self.chain = nn.ModuleList(layersList)
def forward(self, x, logpx=None):
if (logpx is None):
for i in range(len(self.chain)):
x = self.chain[i](x)
return x
else:
for i in range(len(self.chain)):
(x, logpx) = self.chain[i](x, logpx)
return (x, logpx)
def inverse(self, y, logpy=None):
if (logpy is None):
for i in range((len(self.chain) - 1), (- 1), (- 1)):
y = self.chain[i].inverse(y)
return y
else:
for i in range((len(self.chain) - 1), (- 1), (- 1)):
(y, logpy) = self.chain[i].inverse(y, logpy)
return (y, logpy)
|
class Inverse(nn.Module):
def __init__(self, flow):
super(Inverse, self).__init__()
self.flow = flow
def forward(self, x, logpx=None):
return self.flow.inverse(x, logpx)
def inverse(self, y, logpy=None):
return self.flow.forward(y, logpy)
|
class InvertibleLinear(nn.Module):
def __init__(self, dim):
super(InvertibleLinear, self).__init__()
self.dim = dim
self.weight = nn.Parameter(torch.eye(dim)[torch.randperm(dim)])
def forward(self, x, logpx=None):
y = F.linear(x, self.weight)
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad))
def inverse(self, y, logpy=None):
x = F.linear(y, self.weight.inverse())
if (logpy is None):
return x
else:
return (x, (logpy + self._logdetgrad))
@property
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight)))
def extra_repr(self):
return 'dim={}'.format(self.dim)
|
class InvertibleConv2d(nn.Module):
def __init__(self, dim):
super(InvertibleConv2d, self).__init__()
self.dim = dim
self.weight = nn.Parameter(torch.eye(dim)[torch.randperm(dim)])
def forward(self, x, logpx=None):
y = F.conv2d(x, self.weight.view(self.dim, self.dim, 1, 1))
if (logpx is None):
return y
else:
return (y, (logpx - ((self._logdetgrad.expand_as(logpx) * x.shape[2]) * x.shape[3])))
def inverse(self, y, logpy=None):
x = F.conv2d(y, self.weight.inverse().view(self.dim, self.dim, 1, 1))
if (logpy is None):
return x
else:
return (x, (logpy + ((self._logdetgrad.expand_as(logpy) * x.shape[2]) * x.shape[3])))
@property
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight)))
def extra_repr(self):
return 'dim={}'.format(self.dim)
|
class MovingBatchNormNd(nn.Module):
def __init__(self, num_features, eps=0.0001, decay=0.1, bn_lag=0.0, affine=True):
super(MovingBatchNormNd, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.decay = decay
self.bn_lag = bn_lag
self.register_buffer('step', torch.zeros(1))
if self.affine:
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.reset_parameters()
@property
def shape(self):
raise NotImplementedError
def reset_parameters(self):
self.running_mean.zero_()
if self.affine:
self.bias.data.zero_()
def forward(self, x, logpx=None):
c = x.size(1)
used_mean = self.running_mean.clone().detach()
if self.training:
x_t = x.transpose(0, 1).contiguous().view(c, (- 1))
batch_mean = torch.mean(x_t, dim=1)
if (self.bn_lag > 0):
used_mean = (batch_mean - ((1 - self.bn_lag) * (batch_mean - used_mean.detach())))
used_mean /= (1.0 - (self.bn_lag ** (self.step[0] + 1)))
self.running_mean -= (self.decay * (self.running_mean - batch_mean.data))
self.step += 1
used_mean = used_mean.view(*self.shape).expand_as(x)
y = (x - used_mean)
if self.affine:
bias = self.bias.view(*self.shape).expand_as(x)
y = (y + bias)
if (logpx is None):
return y
else:
return (y, logpx)
def inverse(self, y, logpy=None):
used_mean = self.running_mean
if self.affine:
bias = self.bias.view(*self.shape).expand_as(y)
y = (y - bias)
used_mean = used_mean.view(*self.shape).expand_as(y)
x = (y + used_mean)
if (logpy is None):
return x
else:
return (x, logpy)
def __repr__(self):
return '{name}({num_features}, eps={eps}, decay={decay}, bn_lag={bn_lag}, affine={affine})'.format(name=self.__class__.__name__, **self.__dict__)
|
class MovingBatchNorm1d(MovingBatchNormNd):
@property
def shape(self):
return [1, (- 1)]
|
class MovingBatchNorm2d(MovingBatchNormNd):
@property
def shape(self):
return [1, (- 1), 1, 1]
|
class SqueezeLayer(nn.Module):
def __init__(self, downscale_factor):
super(SqueezeLayer, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, x, logpx=None):
squeeze_x = squeeze(x, self.downscale_factor)
if (logpx is None):
return squeeze_x
else:
return (squeeze_x, logpx)
def inverse(self, y, logpy=None):
unsqueeze_y = unsqueeze(y, self.downscale_factor)
if (logpy is None):
return unsqueeze_y
else:
return (unsqueeze_y, logpy)
|
def unsqueeze(input, upscale_factor=2):
return torch.pixel_shuffle(input, upscale_factor)
|
def squeeze(input, downscale_factor=2):
'\n [:, C, H*r, W*r] -> [:, C*r^2, H, W]\n '
(batch_size, in_channels, in_height, in_width) = input.shape
out_channels = (in_channels * (downscale_factor ** 2))
out_height = (in_height // downscale_factor)
out_width = (in_width // downscale_factor)
input_view = input.reshape(batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor)
output = input_view.permute(0, 1, 3, 5, 2, 4)
return output.reshape(batch_size, out_channels, out_height, out_width)
|
class CosineAnnealingWarmRestarts(_LRScheduler):
'Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr, :math:`T_{cur}`\n is the number of epochs since the last restart and :math:`T_{i}` is the number\n of epochs between two warm restarts in SGDR:\n .. math::\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})(1 +\n \\cos(\\frac{T_{cur}}{T_{i}}\\pi))\n When :math:`T_{cur}=T_{i}`, set :math:`\\eta_t = \\eta_{min}`.\n When :math:`T_{cur}=0`(after restart), set :math:`\\eta_t=\\eta_{max}`.\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_0 (int): Number of iterations for the first restart.\n T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.\n eta_min (float, optional): Minimum learning rate. Default: 0.\n last_epoch (int, optional): The index of last epoch. Default: -1.\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n '
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=(- 1)):
if ((T_0 <= 0) or (not isinstance(T_0, int))):
raise ValueError('Expected positive integer T_0, but got {}'.format(T_0))
if ((T_mult < 1) or (not isinstance(T_mult, int))):
raise ValueError('Expected integer T_mul >= 1, but got {}'.format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = last_epoch
def get_lr(self):
return [(self.eta_min + (((base_lr - self.eta_min) * (1 + math.cos(((math.pi * self.T_cur) / self.T_i)))) / 2)) for base_lr in self.base_lrs]
def step(self, epoch=None):
'Step could be called after every update, i.e. if one epoch has 10 iterations\n (number_of_train_examples / batch_size), we should call SGDR.step(0.1), SGDR.step(0.2), etc.\n This function can be called in an interleaved way.\n Example:\n >>> scheduler = SGDR(optimizer, T_0, T_mult)\n >>> for epoch in range(20):\n >>> scheduler.step()\n >>> scheduler.step(26)\n >>> scheduler.step() # scheduler.step(27), instead of scheduler(20)\n '
if (epoch is None):
epoch = (self.last_epoch + 1)
self.T_cur = (self.T_cur + 1)
if (self.T_cur >= self.T_i):
self.T_cur = (self.T_cur - self.T_i)
self.T_i = (self.T_i * self.T_mult)
elif (epoch >= self.T_0):
if (self.T_mult == 1):
self.T_cur = (epoch % self.T_0)
else:
n = int(math.log((((epoch / self.T_0) * (self.T_mult - 1)) + 1), self.T_mult))
self.T_cur = (epoch - ((self.T_0 * ((self.T_mult ** n) - 1)) / (self.T_mult - 1)))
self.T_i = (self.T_0 * (self.T_mult ** n))
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
class Adam(Optimizer):
'Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
if (group['weight_decay'] != 0):
p.data.add(((- step_size) * group['weight_decay']), p.data)
return loss
|
class Adamax(Optimizer):
'Implements Adamax algorithm (a variant of Adam based on infinity norm).\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`__.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n __ https://arxiv.org/abs/1412.6980\n '
def __init__(self, params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Adamax, self).__init__(params, defaults)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_inf'] = torch.zeros_like(p.data)
(exp_avg, exp_inf) = (state['exp_avg'], state['exp_inf'])
(beta1, beta2) = group['betas']
eps = group['eps']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
norm_buf = torch.cat([exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_correction = (1 - (beta1 ** state['step']))
clr = (group['lr'] / bias_correction)
p.data.addcdiv_((- clr), exp_avg, exp_inf)
if (group['weight_decay'] != 0):
p.data.add(((- clr) * group['weight_decay']), p.data)
return loss
|
class RMSprop(Optimizer):
'Implements RMSprop algorithm.\n\n Proposed by G. Hinton in his\n `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.\n\n The centered version first appears in `Generating Sequences\n With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n momentum (float, optional): momentum factor (default: 0)\n alpha (float, optional): smoothing constant (default: 0.99)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n centered (bool, optional) : if ``True``, compute the centered RMSProp,\n the gradient is normalized by an estimation of its variance\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n '
def __init__(self, params, lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= momentum)):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (not (0.0 <= alpha)):
raise ValueError('Invalid alpha value: {}'.format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay)
super(RMSprop, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSprop, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data)
if (group['momentum'] > 0):
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
square_avg.mul_(alpha).addcmul_((1 - alpha), grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_((1 - alpha), grad)
avg = square_avg.addcmul((- 1), grad_avg, grad_avg).sqrt().add_(group['eps'])
else:
avg = square_avg.sqrt().add_(group['eps'])
if (group['momentum'] > 0):
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_((- group['lr']), buf)
else:
p.data.addcdiv_((- group['lr']), grad, avg)
if (group['weight_decay'] != 0):
p.data.add(((- group['lr']) * group['weight_decay']), p.data)
return loss
|
def makedirs(dirname):
if (not os.path.exists(dirname)):
os.makedirs(dirname)
|
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode='a')
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, 'r') as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
return logger
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class RunningAverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if (self.val is None):
self.avg = val
else:
self.avg = ((self.avg * self.momentum) + (val * (1 - self.momentum)))
self.val = val
|
def inf_generator(iterable):
'Allows training with DataLoaders in a single infinite loop:\n for i, (x, y) in enumerate(inf_generator(train_loader)):\n '
iterator = iterable.__iter__()
while True:
try:
(yield iterator.__next__())
except StopIteration:
iterator = iterable.__iter__()
|
def save_checkpoint(state, save, epoch, last_checkpoints=None, num_checkpoints=None):
if (not os.path.exists(save)):
os.makedirs(save)
filename = os.path.join(save, ('checkpt-%04d.pth' % epoch))
torch.save(state, filename)
if ((last_checkpoints is not None) and (num_checkpoints is not None)):
last_checkpoints.append(epoch)
if (len(last_checkpoints) > num_checkpoints):
rm_epoch = last_checkpoints.pop(0)
os.remove(os.path.join(save, ('checkpt-%04d.pth' % rm_epoch)))
|
def isnan(tensor):
return (tensor != tensor)
|
def logsumexp(value, dim=None, keepdim=False):
'Numerically stable implementation of the operation\n value.exp().sum(dim, keepdim).log()\n '
if (dim is not None):
(m, _) = torch.max(value, dim=dim, keepdim=True)
value0 = (value - m)
if (keepdim is False):
m = m.squeeze(dim)
return (m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim)))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp((value - m)))
if isinstance(sum_exp, Number):
return (m + math.log(sum_exp))
else:
return (m + torch.log(sum_exp))
|
class ExponentialMovingAverage(object):
def __init__(self, module, decay=0.999):
'Initializes the model when .apply() is called the first time.\n This is to take into account data-dependent initialization that occurs in the first iteration.'
self.module = module
self.decay = decay
self.shadow_params = {}
self.nparams = sum((p.numel() for p in module.parameters()))
def init(self):
for (name, param) in self.module.named_parameters():
self.shadow_params[name] = param.data.clone()
def apply(self):
if (len(self.shadow_params) == 0):
self.init()
else:
with torch.no_grad():
for (name, param) in self.module.named_parameters():
self.shadow_params[name] -= ((1 - self.decay) * (self.shadow_params[name] - param.data))
def set(self, other_ema):
self.init()
with torch.no_grad():
for (name, param) in other_ema.shadow_params.items():
self.shadow_params[name].copy_(param)
def replace_with_ema(self):
for (name, param) in self.module.named_parameters():
param.data.copy_(self.shadow_params[name])
def swap(self):
for (name, param) in self.module.named_parameters():
tmp = self.shadow_params[name].clone()
self.shadow_params[name].copy_(param.data)
param.data.copy_(tmp)
def __repr__(self):
return '{}(decay={}, module={}, nparams={})'.format(self.__class__.__name__, self.decay, self.module.__class__.__name__, self.nparams)
|
def count_parameters(model):
return sum((p.numel() for p in model.parameters() if p.requires_grad))
|
def standard_normal_sample(size):
return torch.randn(size)
|
def standard_normal_logprob(z):
logZ = ((- 0.5) * math.log((2 * math.pi)))
return (logZ - (z.pow(2) / 2))
|
def compute_loss(args, model, batch_size=None, beta=1.0):
if (batch_size is None):
batch_size = args.batch_size
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
(z, delta_logp) = model(x, zero)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = (logpz - (beta * delta_logp))
loss = (- torch.mean(logpx))
return (loss, torch.mean(logpz), torch.mean((- delta_logp)))
|
def parse_vnorms():
ps = []
for p in args.vnorms:
if (p == 'f'):
ps.append(float('inf'))
else:
ps.append(float(p))
return (ps[:(- 1)], ps[1:])
|
def compute_p_grads(model):
scales = 0.0
nlayers = 0
for m in model.modules():
if (isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear)):
scales = (scales + m.compute_one_iter())
nlayers += 1
scales.mul((1 / nlayers)).mul(0.01).backward()
for m in model.modules():
if (isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear)):
if ((m.domain.grad is not None) and torch.isnan(m.domain.grad)):
m.domain.grad = None
|
def build_nnet(dims, activation_fn=torch.nn.ReLU):
nnet = []
(domains, codomains) = parse_vnorms()
if args.learn_p:
if args.mixed:
domains = [torch.nn.Parameter(torch.tensor(0.0)) for _ in domains]
else:
domains = ([torch.nn.Parameter(torch.tensor(0.0))] * len(domains))
codomains = (domains[1:] + [domains[0]])
for (i, (in_dim, out_dim, domain, codomain)) in enumerate(zip(dims[:(- 1)], dims[1:], domains, codomains)):
nnet.append(activation_fn())
nnet.append(base_layers.get_linear(in_dim, out_dim, coeff=args.coeff, n_iterations=args.n_lipschitz_iters, atol=args.atol, rtol=args.rtol, domain=domain, codomain=codomain, zero_init=(out_dim == 2)))
return torch.nn.Sequential(*nnet)
|
def update_lipschitz(model, n_iterations):
for m in model.modules():
if (isinstance(m, base_layers.SpectralNormConv2d) or isinstance(m, base_layers.SpectralNormLinear)):
m.compute_weight(update=True, n_iterations=n_iterations)
if (isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear)):
m.compute_weight(update=True, n_iterations=n_iterations)
|
def get_ords(model):
ords = []
for m in model.modules():
if (isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear)):
(domain, codomain) = m.compute_domain_codomain()
if torch.is_tensor(domain):
domain = domain.item()
if torch.is_tensor(codomain):
codomain = codomain.item()
ords.append(domain)
ords.append(codomain)
return ords
|
def pretty_repr(a):
return (('[[' + ','.join(list(map((lambda i: f'{i:.2f}'), a)))) + ']]')
|
class BSDS300():
'\n A dataset of patches from BSDS300.\n '
class Data():
'\n Constructs the dataset.\n '
def __init__(self, data):
self.x = data[:]
self.N = self.x.shape[0]
def __init__(self):
f = h5py.File((datasets.root + 'BSDS300/BSDS300.hdf5'), 'r')
self.trn = self.Data(f['train'])
self.val = self.Data(f['validation'])
self.tst = self.Data(f['test'])
self.n_dims = self.trn.x.shape[1]
self.image_size = ([int(np.sqrt((self.n_dims + 1)))] * 2)
f.close()
|
class GAS():
class Data():
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = (datasets.root + 'gas/ethylene_CO.pickle')
(trn, val, tst) = load_data_and_clean_and_split(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
|
def load_data(file):
data = pd.read_pickle(file)
data.drop('Meth', axis=1, inplace=True)
data.drop('Eth', axis=1, inplace=True)
data.drop('Time', axis=1, inplace=True)
return data
|
def get_correlation_numbers(data):
C = data.corr()
A = (C > 0.98)
B = A.as_matrix().sum(axis=1)
return B
|
def load_data_and_clean(file):
data = load_data(file)
B = get_correlation_numbers(data)
while np.any((B > 1)):
col_to_remove = np.where((B > 1))[0][0]
col_name = data.columns[col_to_remove]
data.drop(col_name, axis=1, inplace=True)
B = get_correlation_numbers(data)
data = ((data - data.mean()) / data.std())
return data
|
def load_data_and_clean_and_split(file):
data = load_data_and_clean(file).as_matrix()
N_test = int((0.1 * data.shape[0]))
data_test = data[(- N_test):]
data_train = data[0:(- N_test)]
N_validate = int((0.1 * data_train.shape[0]))
data_validate = data_train[(- N_validate):]
data_train = data_train[0:(- N_validate)]
return (data_train, data_validate, data_test)
|
class MINIBOONE():
class Data():
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = (datasets.root + 'miniboone/data.npy')
(trn, val, tst) = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
|
def load_data(root_path):
data = np.load(root_path)
N_test = int((0.1 * data.shape[0]))
data_test = data[(- N_test):]
data = data[0:(- N_test)]
N_validate = int((0.1 * data.shape[0]))
data_validate = data[(- N_validate):]
data_train = data[0:(- N_validate)]
return (data_train, data_validate, data_test)
|
def load_data_normalised(root_path):
(data_train, data_validate, data_test) = load_data(root_path)
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = ((data_train - mu) / s)
data_validate = ((data_validate - mu) / s)
data_test = ((data_test - mu) / s)
return (data_train, data_validate, data_test)
|
class POWER():
class Data():
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
(trn, val, tst) = load_data_normalised()
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
|
def load_data():
return np.load((datasets.root + 'power/data.npy'))
|
def load_data_split_with_noise():
rng = np.random.RandomState(42)
data = load_data()
rng.shuffle(data)
N = data.shape[0]
data = np.delete(data, 3, axis=1)
data = np.delete(data, 1, axis=1)
voltage_noise = (0.01 * rng.rand(N, 1))
gap_noise = (0.001 * rng.rand(N, 1))
sm_noise = rng.rand(N, 3)
time_noise = np.zeros((N, 1))
noise = np.hstack((gap_noise, voltage_noise, sm_noise, time_noise))
data = (data + noise)
N_test = int((0.1 * data.shape[0]))
data_test = data[(- N_test):]
data = data[0:(- N_test)]
N_validate = int((0.1 * data.shape[0]))
data_validate = data[(- N_validate):]
data_train = data[0:(- N_validate)]
return (data_train, data_validate, data_test)
|
def load_data_normalised():
(data_train, data_validate, data_test) = load_data_split_with_noise()
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = ((data_train - mu) / s)
data_validate = ((data_validate - mu) / s)
data_test = ((data_test - mu) / s)
return (data_train, data_validate, data_test)
|
def get_losses(filename):
with open(filename, 'r') as f:
lines = f.readlines()
losses = []
for line in lines:
w = re.findall('Bit/dim [^|(]*\\([0-9\\.]*\\)', line)
if w:
w = re.findall('\\([0-9\\.]*\\)', w[0])
if w:
w = re.findall('[0-9\\.]+', w[0])
if w:
losses.append(float(w[0]))
return losses
|
def get_values(filename):
with open(filename, 'r') as f:
lines = f.readlines()
losses = []
nfes = []
for line in lines:
w = re.findall('Steps [^|(]*\\([0-9\\.]*\\)', line)
if w:
w = re.findall('\\([0-9\\.]*\\)', w[0])
if w:
w = re.findall('[0-9\\.]+', w[0])
if w:
nfes.append(float(w[0]))
w = re.findall('Bit/dim [^|(]*\\([0-9\\.]*\\)', line)
if w:
w = re.findall('\\([0-9\\.]*\\)', w[0])
if w:
w = re.findall('[0-9\\.]+', w[0])
if w:
losses.append(float(w[0]))
return (losses, nfes)
|
def construct_discrete_model():
chain = []
for i in range(args.depth):
if args.glow:
chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=((i % 2) == 0)))
return layers.SequentialFlow(chain)
|
def get_transforms(model):
def sample_fn(z, logpz=None):
if (logpz is not None):
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if (logpx is not None):
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return (sample_fn, density_fn)
|
def get_values(filename):
with open(filename, 'r') as f:
lines = f.readlines()
losses = []
nfes = []
for line in lines:
w = re.findall('Steps [^|(]*\\([0-9\\.]*\\)', line)
if w:
w = re.findall('\\([0-9\\.]*\\)', w[0])
if w:
w = re.findall('[0-9\\.]+', w[0])
if w:
nfes.append(float(w[0]))
w = re.findall('Bit/dim [^|(]*\\([0-9\\.]*\\)', line)
if w:
w = re.findall('\\([0-9\\.]*\\)', w[0])
if w:
w = re.findall('[0-9\\.]+', w[0])
if w:
losses.append(float(w[0]))
return (losses, nfes)
|
def log_to_csv(log_filename, csv_filename):
with open(log_filename, 'r') as f:
lines = f.readlines()
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = None
writer = None
for line in lines:
if line.startswith('Iter'):
quants = _line_to_dict(line)
if (fieldnames is None):
fieldnames = quants.keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(quants)
|
def _line_to_dict(line):
line = re.sub(':', '', line)
line = re.sub('\\([^)]*\\)', '', line)
quants = {}
for quant_str in line.split('|'):
quant_str = quant_str.strip()
(key, val) = quant_str.split(' ')
quants[key] = val
return quants
|
def plot_pairplot(csv_filename, fig_filename, top=None):
import seaborn as sns
import pandas as pd
sns.set(style='ticks', color_codes=True)
quants = pd.read_csv(csv_filename)
if (top is not None):
quants = quants[:top]
g = sns.pairplot(quants, kind='reg', diag_kind='kde', markers='.')
g.savefig(fig_filename)
|
def add_noise(x):
'\n [0, 1] -> [0, 255] -> add noise -> [0, 1]\n '
noise = x.new().resize_as_(x).uniform_()
x = ((x * 255) + noise)
x = (x / 256)
return x
|
def get_dataset(args):
trans = (lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
if (args.data == 'mnist'):
im_dim = 1
im_size = (28 if (args.imagesize is None) else args.imagesize)
train_set = dset.MNIST(root='./data', train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root='./data', train=False, transform=trans(im_size), download=True)
elif (args.data == 'svhn'):
im_dim = 3
im_size = (32 if (args.imagesize is None) else args.imagesize)
train_set = dset.SVHN(root='./data', split='train', transform=trans(im_size), download=True)
test_set = dset.SVHN(root='./data', split='test', transform=trans(im_size), download=True)
elif (args.data == 'cifar10'):
im_dim = 3
im_size = (32 if (args.imagesize is None) else args.imagesize)
train_set = dset.CIFAR10(root='./data', train=True, transform=trans(im_size), download=True)
test_set = dset.CIFAR10(root='./data', train=False, transform=trans(im_size), download=True)
elif (args.dataset == 'celeba'):
im_dim = 3
im_size = (64 if (args.imagesize is None) else args.imagesize)
train_set = dset.CelebA(train=True, transform=tforms.Compose([tforms.ToPILImage(), tforms.Resize(im_size), tforms.RandomHorizontalFlip(), tforms.ToTensor(), add_noise]))
test_set = dset.CelebA(train=False, transform=tforms.Compose([tforms.ToPILImage(), tforms.Resize(args.imagesize), tforms.ToTensor(), add_noise]))
data_shape = (im_dim, im_size, im_size)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=args.batch_size, shuffle=False)
return (train_loader, test_loader, data_shape)
|
def add_spectral_norm(model):
def recursive_apply_sn(parent_module):
for child_name in list(parent_module._modules.keys()):
child_module = parent_module._modules[child_name]
classname = child_module.__class__.__name__
if ((classname.find('Conv') != (- 1)) and ('weight' in child_module._parameters)):
del parent_module._modules[child_name]
parent_module.add_module(child_name, spectral_norm.spectral_norm(child_module, 'weight'))
else:
recursive_apply_sn(child_module)
recursive_apply_sn(model)
|
def build_model(args, state_dict):
(train_loader, test_loader, data_shape) = get_dataset(args)
hidden_dims = tuple(map(int, args.dims.split(',')))
strides = tuple(map(int, args.strides.split(',')))
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.AutoencoderODEfunc(autoencoder_diffeq=autoencoder_diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, solver=args.solver)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.ODEfunc(diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, solver=args.solver)
return cnf
chain = [layers.LogitTransform(alpha=args.alpha), build_cnf()]
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
if args.spectral_norm:
add_spectral_norm(model)
model.load_state_dict(state_dict)
return (model, test_loader.dataset)
|
class Adam(Optimizer):
'Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p.data.add(((- step_size) * group['weight_decay']), p.data)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss
|
class Dataset(object):
def __init__(self, loc, transform=None):
self.dataset = torch.load(loc).float().div(255)
self.transform = transform
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
x = self.dataset[index]
x = (self.transform(x) if (self.transform is not None) else x)
return (x, 0)
|
class CelebA(Dataset):
TRAIN_LOC = 'data/celeba/celeba_train.pth'
VAL_LOC = 'data/celeba/celeba_val.pth'
def __init__(self, train=True, transform=None):
return super(CelebA, self).__init__((self.TRAIN_LOC if train else self.VAL_LOC), transform)
|
class CNF(nn.Module):
def __init__(self, odefunc, T=1.0, train_T=False, regularization_fns=None, solver='dopri5', atol=1e-05, rtol=1e-05):
super(CNF, self).__init__()
if train_T:
self.register_parameter('sqrt_end_time', nn.Parameter(torch.sqrt(torch.tensor(T))))
else:
self.register_buffer('sqrt_end_time', torch.sqrt(torch.tensor(T)))
nreg = 0
if (regularization_fns is not None):
odefunc = RegularizedODEfunc(odefunc, regularization_fns)
nreg = len(regularization_fns)
self.odefunc = odefunc
self.nreg = nreg
self.regularization_states = None
self.solver = solver
self.atol = atol
self.rtol = rtol
self.test_solver = solver
self.test_atol = atol
self.test_rtol = rtol
self.solver_options = {}
def forward(self, z, logpz=None, integration_times=None, reverse=False):
if (logpz is None):
_logpz = torch.zeros(z.shape[0], 1).to(z)
else:
_logpz = logpz
if (integration_times is None):
integration_times = torch.tensor([0.0, (self.sqrt_end_time * self.sqrt_end_time)]).to(z)
if reverse:
integration_times = _flip(integration_times, 0)
self.odefunc.before_odeint()
reg_states = tuple((torch.tensor(0).to(z) for _ in range(self.nreg)))
if self.training:
state_t = odeint(self.odefunc, ((z, _logpz) + reg_states), integration_times.to(z), atol=self.atol, rtol=self.rtol, method=self.solver, options=self.solver_options)
else:
state_t = odeint(self.odefunc, (z, _logpz), integration_times.to(z), atol=self.test_atol, rtol=self.test_rtol, method=self.test_solver)
if (len(integration_times) == 2):
state_t = tuple((s[1] for s in state_t))
(z_t, logpz_t) = state_t[:2]
self.regularization_states = state_t[2:]
if (logpz is not None):
return (z_t, logpz_t)
else:
return z_t
def get_regularization_states(self):
reg_states = self.regularization_states
self.regularization_states = None
return reg_states
def num_evals(self):
return self.odefunc._num_evals.item()
|
def _flip(x, dim):
indices = ([slice(None)] * x.dim())
indices[dim] = torch.arange((x.size(dim) - 1), (- 1), (- 1), dtype=torch.long, device=x.device)
return x[tuple(indices)]
|
class SequentialFlow(nn.Module):
'A generalized nn.Sequential container for normalizing flows.\n '
def __init__(self, layersList):
super(SequentialFlow, self).__init__()
self.chain = nn.ModuleList(layersList)
def forward(self, x, logpx=None, reverse=False, inds=None):
if (inds is None):
if reverse:
inds = range((len(self.chain) - 1), (- 1), (- 1))
else:
inds = range(len(self.chain))
if (logpx is None):
for i in inds:
x = self.chain[i](x, reverse=reverse)
return x
else:
for i in inds:
(x, logpx) = self.chain[i](x, logpx, reverse=reverse)
return (x, logpx)
|
class SequentialDiffEq(nn.Module):
'A container for a sequential chain of layers. Supports both regular and diffeq layers.\n '
def __init__(self, *layers):
super(SequentialDiffEq, self).__init__()
self.layers = nn.ModuleList([diffeq_wrapper(layer) for layer in layers])
def forward(self, t, x):
for layer in self.layers:
x = layer(t, x)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.