|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
import torch |
|
from torch_utils import misc |
|
from torch_utils import persistence |
|
from torch_utils.ops import conv2d_resample |
|
from torch_utils.ops import upfirdn2d |
|
from torch_utils.ops import bias_act |
|
from torch_utils.ops import fma |
|
|
|
|
|
|
|
|
|
@misc.profiled_function |
|
def normalize_2nd_moment(x, dim=1, eps=1e-8): |
|
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() |
|
|
|
|
|
|
|
|
|
|
|
@misc.profiled_function |
|
def modulated_conv2d( |
|
x, |
|
weight, |
|
styles, |
|
noise=None, |
|
up=1, |
|
down=1, |
|
padding=0, |
|
resample_filter=None, |
|
demodulate=True, |
|
flip_weight=True, |
|
fused_modconv=True, |
|
): |
|
batch_size = x.shape[0] |
|
out_channels, in_channels, kh, kw = weight.shape |
|
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) |
|
misc.assert_shape(x, [batch_size, in_channels, None, None]) |
|
misc.assert_shape(styles, [batch_size, in_channels]) |
|
|
|
|
|
if x.dtype == torch.float16 and demodulate: |
|
weight = weight * ( |
|
1 |
|
/ np.sqrt(in_channels * kh * kw) |
|
/ weight.norm(float("inf"), dim=[1, 2, 3], keepdim=True) |
|
) |
|
styles = styles / styles.norm(float("inf"), dim=1, keepdim=True) |
|
|
|
|
|
w = None |
|
dcoefs = None |
|
if demodulate or fused_modconv: |
|
w = weight.unsqueeze(0) |
|
w = w * styles.reshape(batch_size, 1, -1, 1, 1) |
|
if demodulate: |
|
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() |
|
if demodulate and fused_modconv: |
|
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) |
|
|
|
|
|
if not fused_modconv: |
|
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) |
|
x = conv2d_resample.conv2d_resample( |
|
x=x, |
|
w=weight.to(x.dtype), |
|
f=resample_filter, |
|
up=up, |
|
down=down, |
|
padding=padding, |
|
flip_weight=flip_weight, |
|
) |
|
if demodulate and noise is not None: |
|
x = fma.fma( |
|
x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype) |
|
) |
|
elif demodulate: |
|
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) |
|
elif noise is not None: |
|
x = x.add_(noise.to(x.dtype)) |
|
return x |
|
|
|
|
|
with misc.suppress_tracer_warnings(): |
|
batch_size = int(batch_size) |
|
misc.assert_shape(x, [batch_size, in_channels, None, None]) |
|
x = x.reshape(1, -1, *x.shape[2:]) |
|
w = w.reshape(-1, in_channels, kh, kw) |
|
x = conv2d_resample.conv2d_resample( |
|
x=x, |
|
w=w.to(x.dtype), |
|
f=resample_filter, |
|
up=up, |
|
down=down, |
|
padding=padding, |
|
groups=batch_size, |
|
flip_weight=flip_weight, |
|
) |
|
x = x.reshape(batch_size, -1, *x.shape[2:]) |
|
if noise is not None: |
|
x = x.add_(noise) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class FullyConnectedLayer(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_features, |
|
out_features, |
|
bias=True, |
|
activation="linear", |
|
lr_multiplier=1, |
|
bias_init=0, |
|
): |
|
super().__init__() |
|
self.activation = activation |
|
self.weight = torch.nn.Parameter( |
|
torch.randn([out_features, in_features]) / lr_multiplier |
|
) |
|
self.bias = ( |
|
torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) |
|
if bias |
|
else None |
|
) |
|
self.weight_gain = lr_multiplier / np.sqrt(in_features) |
|
self.bias_gain = lr_multiplier |
|
|
|
def forward(self, x): |
|
w = self.weight.to(x.dtype) * self.weight_gain |
|
b = self.bias |
|
if b is not None: |
|
b = b.to(x.dtype) |
|
if self.bias_gain != 1: |
|
b = b * self.bias_gain |
|
|
|
if self.activation == "linear" and b is not None: |
|
x = torch.addmm(b.unsqueeze(0), x, w.t()) |
|
else: |
|
x = x.matmul(w.t()) |
|
x = bias_act.bias_act(x, b, act=self.activation) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class Conv2dLayer(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
out_channels, |
|
kernel_size, |
|
bias=True, |
|
activation="linear", |
|
up=1, |
|
down=1, |
|
resample_filter=[ |
|
1, |
|
3, |
|
3, |
|
1, |
|
], |
|
conv_clamp=None, |
|
channels_last=False, |
|
trainable=True, |
|
): |
|
super().__init__() |
|
self.activation = activation |
|
self.up = up |
|
self.down = down |
|
self.conv_clamp = conv_clamp |
|
self.register_buffer("resample_filter", upfirdn2d.setup_filter(resample_filter)) |
|
self.padding = kernel_size // 2 |
|
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) |
|
self.act_gain = bias_act.activation_funcs[activation].def_gain |
|
|
|
memory_format = ( |
|
torch.channels_last if channels_last else torch.contiguous_format |
|
) |
|
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( |
|
memory_format=memory_format |
|
) |
|
bias = torch.zeros([out_channels]) if bias else None |
|
if trainable: |
|
self.weight = torch.nn.Parameter(weight) |
|
self.bias = torch.nn.Parameter(bias) if bias is not None else None |
|
else: |
|
self.register_buffer("weight", weight) |
|
if bias is not None: |
|
self.register_buffer("bias", bias) |
|
else: |
|
self.bias = None |
|
|
|
def forward(self, x, gain=1): |
|
w = self.weight * self.weight_gain |
|
b = self.bias.to(x.dtype) if self.bias is not None else None |
|
flip_weight = self.up == 1 |
|
x = conv2d_resample.conv2d_resample( |
|
x=x, |
|
w=w.to(x.dtype), |
|
f=self.resample_filter, |
|
up=self.up, |
|
down=self.down, |
|
padding=self.padding, |
|
flip_weight=flip_weight, |
|
) |
|
|
|
act_gain = self.act_gain * gain |
|
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None |
|
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class MappingNetwork(torch.nn.Module): |
|
def __init__( |
|
self, |
|
z_dim, |
|
c_dim, |
|
h_dim, |
|
w_dim, |
|
num_ws, |
|
num_layers=8, |
|
embed_features=None, |
|
embed_features_feat=None, |
|
layer_features=None, |
|
activation="lrelu", |
|
lr_multiplier=0.01, |
|
w_avg_beta=0.995, |
|
): |
|
super().__init__() |
|
self.z_dim = z_dim |
|
self.c_dim = c_dim |
|
self.h_dim = h_dim |
|
self.w_dim = w_dim |
|
self.num_ws = num_ws |
|
self.num_layers = num_layers |
|
self.w_avg_beta = w_avg_beta |
|
|
|
if embed_features is None: |
|
embed_features = w_dim |
|
if embed_features_feat is None: |
|
embed_features_feat = w_dim |
|
if c_dim == 0: |
|
embed_features = 0 |
|
if h_dim == 0: |
|
embed_features_feat = 0 |
|
if layer_features is None: |
|
layer_features = w_dim |
|
features_list = ( |
|
[z_dim + embed_features + embed_features_feat] |
|
+ [layer_features] * (num_layers - 1) |
|
+ [w_dim] |
|
) |
|
|
|
if c_dim > 0: |
|
self.embed = FullyConnectedLayer(c_dim, embed_features) |
|
if h_dim > 0: |
|
self.embed_feats = FullyConnectedLayer(h_dim, embed_features_feat) |
|
for idx in range(num_layers): |
|
in_features = features_list[idx] |
|
out_features = features_list[idx + 1] |
|
layer = FullyConnectedLayer( |
|
in_features, |
|
out_features, |
|
activation=activation, |
|
lr_multiplier=lr_multiplier, |
|
) |
|
setattr(self, f"fc{idx}", layer) |
|
|
|
if num_ws is not None and w_avg_beta is not None: |
|
self.register_buffer("w_avg", torch.zeros([w_dim])) |
|
|
|
def forward( |
|
self, z, c, h, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False |
|
): |
|
|
|
x = None |
|
with torch.autograd.profiler.record_function("input"): |
|
if self.z_dim > 0: |
|
misc.assert_shape(z, [None, self.z_dim]) |
|
x = normalize_2nd_moment(z.to(torch.float32)) |
|
if self.c_dim > 0 and self.h_dim > 0: |
|
misc.assert_shape(c, [None, self.c_dim]) |
|
misc.assert_shape(h, [None, self.h_dim]) |
|
y = torch.cat( |
|
[ |
|
self.embed(c.to(torch.float32)), |
|
self.embed_feats(h.to(torch.float32)), |
|
], |
|
dim=1, |
|
) |
|
y = normalize_2nd_moment(y) |
|
x = torch.cat([x, y], dim=1) if x is not None else y |
|
elif self.c_dim > 0: |
|
misc.assert_shape(c, [None, self.c_dim]) |
|
y = normalize_2nd_moment(self.embed(c.to(torch.float32))) |
|
x = torch.cat([x, y], dim=1) if x is not None else y |
|
elif self.h_dim > 0: |
|
misc.assert_shape(h, [None, self.h_dim]) |
|
h = normalize_2nd_moment(self.embed_feats(h.to(torch.float32))) |
|
x = torch.cat([x, h], dim=1) if x is not None else h |
|
|
|
|
|
for idx in range(self.num_layers): |
|
layer = getattr(self, f"fc{idx}") |
|
x = layer(x) |
|
|
|
|
|
if self.w_avg_beta is not None and self.training and not skip_w_avg_update: |
|
with torch.autograd.profiler.record_function("update_w_avg"): |
|
self.w_avg.copy_( |
|
x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta) |
|
) |
|
|
|
|
|
if self.num_ws is not None: |
|
with torch.autograd.profiler.record_function("broadcast"): |
|
x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) |
|
|
|
|
|
if truncation_psi != 1: |
|
with torch.autograd.profiler.record_function("truncate"): |
|
assert self.w_avg_beta is not None |
|
if self.num_ws is None or truncation_cutoff is None: |
|
x = self.w_avg.lerp(x, truncation_psi) |
|
else: |
|
x[:, :truncation_cutoff] = self.w_avg.lerp( |
|
x[:, :truncation_cutoff], truncation_psi |
|
) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class SynthesisLayer(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
out_channels, |
|
w_dim, |
|
resolution, |
|
kernel_size=3, |
|
up=1, |
|
use_noise=True, |
|
activation="lrelu", |
|
resample_filter=[ |
|
1, |
|
3, |
|
3, |
|
1, |
|
], |
|
conv_clamp=None, |
|
channels_last=False, |
|
): |
|
super().__init__() |
|
self.resolution = resolution |
|
self.up = up |
|
self.use_noise = use_noise |
|
self.activation = activation |
|
self.conv_clamp = conv_clamp |
|
self.register_buffer("resample_filter", upfirdn2d.setup_filter(resample_filter)) |
|
self.padding = kernel_size // 2 |
|
self.act_gain = bias_act.activation_funcs[activation].def_gain |
|
|
|
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) |
|
memory_format = ( |
|
torch.channels_last if channels_last else torch.contiguous_format |
|
) |
|
self.weight = torch.nn.Parameter( |
|
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( |
|
memory_format=memory_format |
|
) |
|
) |
|
if use_noise: |
|
self.register_buffer("noise_const", torch.randn([resolution, resolution])) |
|
self.noise_strength = torch.nn.Parameter(torch.zeros([])) |
|
self.bias = torch.nn.Parameter(torch.zeros([out_channels])) |
|
|
|
def forward(self, x, w, noise_mode="random", fused_modconv=True, gain=1): |
|
assert noise_mode in ["random", "const", "none"] |
|
in_resolution = self.resolution // self.up |
|
misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution]) |
|
styles = self.affine(w) |
|
|
|
noise = None |
|
if self.use_noise and noise_mode == "random": |
|
noise = ( |
|
torch.randn( |
|
[x.shape[0], 1, self.resolution, self.resolution], device=x.device |
|
) |
|
* self.noise_strength |
|
) |
|
if self.use_noise and noise_mode == "const": |
|
noise = self.noise_const * self.noise_strength |
|
|
|
flip_weight = self.up == 1 |
|
x = modulated_conv2d( |
|
x=x, |
|
weight=self.weight, |
|
styles=styles, |
|
noise=noise, |
|
up=self.up, |
|
padding=self.padding, |
|
resample_filter=self.resample_filter, |
|
flip_weight=flip_weight, |
|
fused_modconv=fused_modconv, |
|
) |
|
|
|
act_gain = self.act_gain * gain |
|
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None |
|
x = bias_act.bias_act( |
|
x, |
|
self.bias.to(x.dtype), |
|
act=self.activation, |
|
gain=act_gain, |
|
clamp=act_clamp, |
|
) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class ToRGBLayer(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
out_channels, |
|
w_dim, |
|
kernel_size=1, |
|
conv_clamp=None, |
|
channels_last=False, |
|
): |
|
super().__init__() |
|
self.conv_clamp = conv_clamp |
|
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) |
|
memory_format = ( |
|
torch.channels_last if channels_last else torch.contiguous_format |
|
) |
|
self.weight = torch.nn.Parameter( |
|
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( |
|
memory_format=memory_format |
|
) |
|
) |
|
self.bias = torch.nn.Parameter(torch.zeros([out_channels])) |
|
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) |
|
|
|
def forward(self, x, w, fused_modconv=True): |
|
styles = self.affine(w) * self.weight_gain |
|
x = modulated_conv2d( |
|
x=x, |
|
weight=self.weight, |
|
styles=styles, |
|
demodulate=False, |
|
fused_modconv=fused_modconv, |
|
) |
|
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class SynthesisBlock(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
out_channels, |
|
w_dim, |
|
resolution, |
|
img_channels, |
|
is_last, |
|
architecture="skip", |
|
resample_filter=[ |
|
1, |
|
3, |
|
3, |
|
1, |
|
], |
|
conv_clamp=None, |
|
use_fp16=False, |
|
fp16_channels_last=False, |
|
**layer_kwargs, |
|
): |
|
assert architecture in ["orig", "skip", "resnet"] |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.w_dim = w_dim |
|
self.resolution = resolution |
|
self.img_channels = img_channels |
|
self.is_last = is_last |
|
self.architecture = architecture |
|
self.use_fp16 = use_fp16 |
|
self.channels_last = use_fp16 and fp16_channels_last |
|
self.register_buffer("resample_filter", upfirdn2d.setup_filter(resample_filter)) |
|
self.num_conv = 0 |
|
self.num_torgb = 0 |
|
|
|
if in_channels == 0: |
|
self.const = torch.nn.Parameter( |
|
torch.randn([out_channels, resolution, resolution]) |
|
) |
|
|
|
if in_channels != 0: |
|
self.conv0 = SynthesisLayer( |
|
in_channels, |
|
out_channels, |
|
w_dim=w_dim, |
|
resolution=resolution, |
|
up=2, |
|
resample_filter=resample_filter, |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
**layer_kwargs, |
|
) |
|
self.num_conv += 1 |
|
|
|
self.conv1 = SynthesisLayer( |
|
out_channels, |
|
out_channels, |
|
w_dim=w_dim, |
|
resolution=resolution, |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
**layer_kwargs, |
|
) |
|
self.num_conv += 1 |
|
|
|
if is_last or architecture == "skip": |
|
self.torgb = ToRGBLayer( |
|
out_channels, |
|
img_channels, |
|
w_dim=w_dim, |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
) |
|
self.num_torgb += 1 |
|
|
|
if in_channels != 0 and architecture == "resnet": |
|
self.skip = Conv2dLayer( |
|
in_channels, |
|
out_channels, |
|
kernel_size=1, |
|
bias=False, |
|
up=2, |
|
resample_filter=resample_filter, |
|
channels_last=self.channels_last, |
|
) |
|
|
|
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs): |
|
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) |
|
w_iter = iter(ws.unbind(dim=1)) |
|
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 |
|
memory_format = ( |
|
torch.channels_last |
|
if self.channels_last and not force_fp32 |
|
else torch.contiguous_format |
|
) |
|
if fused_modconv is None: |
|
with misc.suppress_tracer_warnings(): |
|
fused_modconv = (not self.training) and ( |
|
dtype == torch.float32 or int(x.shape[0]) == 1 |
|
) |
|
|
|
|
|
if self.in_channels == 0: |
|
x = self.const.to(dtype=dtype, memory_format=memory_format) |
|
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) |
|
else: |
|
misc.assert_shape( |
|
x, [None, self.in_channels, self.resolution // 2, self.resolution // 2] |
|
) |
|
x = x.to(dtype=dtype, memory_format=memory_format) |
|
|
|
|
|
if self.in_channels == 0: |
|
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) |
|
elif self.architecture == "resnet": |
|
y = self.skip(x, gain=np.sqrt(0.5)) |
|
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) |
|
x = self.conv1( |
|
x, |
|
next(w_iter), |
|
fused_modconv=fused_modconv, |
|
gain=np.sqrt(0.5), |
|
**layer_kwargs, |
|
) |
|
x = y.add_(x) |
|
else: |
|
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) |
|
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) |
|
|
|
|
|
if img is not None: |
|
misc.assert_shape( |
|
img, |
|
[None, self.img_channels, self.resolution // 2, self.resolution // 2], |
|
) |
|
img = upfirdn2d.upsample2d(img, self.resample_filter) |
|
if self.is_last or self.architecture == "skip": |
|
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) |
|
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) |
|
img = img.add_(y) if img is not None else y |
|
|
|
assert x.dtype == dtype |
|
assert img is None or img.dtype == torch.float32 |
|
return x, img |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class SynthesisNetwork(torch.nn.Module): |
|
def __init__( |
|
self, |
|
w_dim, |
|
img_resolution, |
|
img_channels, |
|
channel_base=32768, |
|
channel_max=512, |
|
num_fp16_res=0, |
|
**block_kwargs, |
|
): |
|
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 |
|
super().__init__() |
|
self.w_dim = w_dim |
|
self.img_resolution = img_resolution |
|
self.img_resolution_log2 = int(np.log2(img_resolution)) |
|
self.img_channels = img_channels |
|
self.block_resolutions = [ |
|
2 ** i for i in range(2, self.img_resolution_log2 + 1) |
|
] |
|
channels_dict = { |
|
res: min(channel_base // res, channel_max) for res in self.block_resolutions |
|
} |
|
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) |
|
|
|
self.num_ws = 0 |
|
for res in self.block_resolutions: |
|
in_channels = channels_dict[res // 2] if res > 4 else 0 |
|
out_channels = channels_dict[res] |
|
use_fp16 = res >= fp16_resolution |
|
is_last = res == self.img_resolution |
|
block = SynthesisBlock( |
|
in_channels, |
|
out_channels, |
|
w_dim=w_dim, |
|
resolution=res, |
|
img_channels=img_channels, |
|
is_last=is_last, |
|
use_fp16=use_fp16, |
|
**block_kwargs, |
|
) |
|
self.num_ws += block.num_conv |
|
if is_last: |
|
self.num_ws += block.num_torgb |
|
setattr(self, f"b{res}", block) |
|
|
|
def forward(self, ws, **block_kwargs): |
|
block_ws = [] |
|
with torch.autograd.profiler.record_function("split_ws"): |
|
misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) |
|
ws = ws.to(torch.float32) |
|
w_idx = 0 |
|
for res in self.block_resolutions: |
|
block = getattr(self, f"b{res}") |
|
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) |
|
w_idx += block.num_conv |
|
|
|
x = img = None |
|
for res, cur_ws in zip(self.block_resolutions, block_ws): |
|
block = getattr(self, f"b{res}") |
|
x, img = block(x, img, cur_ws, **block_kwargs) |
|
return img |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class Generator(torch.nn.Module): |
|
def __init__( |
|
self, |
|
z_dim, |
|
c_dim, |
|
h_dim, |
|
w_dim, |
|
img_resolution, |
|
img_channels, |
|
mapping_kwargs={}, |
|
synthesis_kwargs={}, |
|
): |
|
super().__init__() |
|
self.z_dim = z_dim |
|
self.c_dim = c_dim |
|
self.h_dim = h_dim |
|
self.w_dim = w_dim |
|
self.img_resolution = img_resolution |
|
self.img_channels = img_channels |
|
self.synthesis = SynthesisNetwork( |
|
w_dim=w_dim, |
|
img_resolution=img_resolution, |
|
img_channels=img_channels, |
|
**synthesis_kwargs, |
|
) |
|
self.num_ws = self.synthesis.num_ws |
|
self.mapping = MappingNetwork( |
|
z_dim=z_dim, |
|
c_dim=c_dim, |
|
h_dim=h_dim, |
|
w_dim=w_dim, |
|
num_ws=self.num_ws, |
|
**mapping_kwargs, |
|
) |
|
|
|
def forward( |
|
self, z, c, feats, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs |
|
): |
|
ws = self.mapping( |
|
z, |
|
c, |
|
feats, |
|
truncation_psi=truncation_psi, |
|
truncation_cutoff=truncation_cutoff, |
|
) |
|
img = self.synthesis(ws, **synthesis_kwargs) |
|
return img |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class DiscriminatorBlock(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
tmp_channels, |
|
out_channels, |
|
resolution, |
|
img_channels, |
|
first_layer_idx, |
|
architecture="resnet", |
|
activation="lrelu", |
|
resample_filter=[ |
|
1, |
|
3, |
|
3, |
|
1, |
|
], |
|
conv_clamp=None, |
|
use_fp16=False, |
|
fp16_channels_last=False, |
|
freeze_layers=0, |
|
): |
|
assert in_channels in [0, tmp_channels] |
|
assert architecture in ["orig", "skip", "resnet"] |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.resolution = resolution |
|
self.img_channels = img_channels |
|
self.first_layer_idx = first_layer_idx |
|
self.architecture = architecture |
|
self.use_fp16 = use_fp16 |
|
self.channels_last = use_fp16 and fp16_channels_last |
|
self.register_buffer("resample_filter", upfirdn2d.setup_filter(resample_filter)) |
|
|
|
self.num_layers = 0 |
|
|
|
def trainable_gen(): |
|
while True: |
|
layer_idx = self.first_layer_idx + self.num_layers |
|
trainable = layer_idx >= freeze_layers |
|
self.num_layers += 1 |
|
yield trainable |
|
|
|
trainable_iter = trainable_gen() |
|
|
|
if in_channels == 0 or architecture == "skip": |
|
self.fromrgb = Conv2dLayer( |
|
img_channels, |
|
tmp_channels, |
|
kernel_size=1, |
|
activation=activation, |
|
trainable=next(trainable_iter), |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
) |
|
|
|
self.conv0 = Conv2dLayer( |
|
tmp_channels, |
|
tmp_channels, |
|
kernel_size=3, |
|
activation=activation, |
|
trainable=next(trainable_iter), |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
) |
|
|
|
self.conv1 = Conv2dLayer( |
|
tmp_channels, |
|
out_channels, |
|
kernel_size=3, |
|
activation=activation, |
|
down=2, |
|
trainable=next(trainable_iter), |
|
resample_filter=resample_filter, |
|
conv_clamp=conv_clamp, |
|
channels_last=self.channels_last, |
|
) |
|
|
|
if architecture == "resnet": |
|
self.skip = Conv2dLayer( |
|
tmp_channels, |
|
out_channels, |
|
kernel_size=1, |
|
bias=False, |
|
down=2, |
|
trainable=next(trainable_iter), |
|
resample_filter=resample_filter, |
|
channels_last=self.channels_last, |
|
) |
|
|
|
def forward(self, x, img, force_fp32=False): |
|
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 |
|
memory_format = ( |
|
torch.channels_last |
|
if self.channels_last and not force_fp32 |
|
else torch.contiguous_format |
|
) |
|
|
|
|
|
if x is not None: |
|
misc.assert_shape( |
|
x, [None, self.in_channels, self.resolution, self.resolution] |
|
) |
|
x = x.to(dtype=dtype, memory_format=memory_format) |
|
|
|
|
|
if self.in_channels == 0 or self.architecture == "skip": |
|
misc.assert_shape( |
|
img, [None, self.img_channels, self.resolution, self.resolution] |
|
) |
|
img = img.to(dtype=dtype, memory_format=memory_format) |
|
y = self.fromrgb(img) |
|
x = x + y if x is not None else y |
|
img = ( |
|
upfirdn2d.downsample2d(img, self.resample_filter) |
|
if self.architecture == "skip" |
|
else None |
|
) |
|
|
|
|
|
if self.architecture == "resnet": |
|
y = self.skip(x, gain=np.sqrt(0.5)) |
|
x = self.conv0(x) |
|
x = self.conv1(x, gain=np.sqrt(0.5)) |
|
x = y.add_(x) |
|
else: |
|
x = self.conv0(x) |
|
x = self.conv1(x) |
|
|
|
assert x.dtype == dtype |
|
return x, img |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class MinibatchStdLayer(torch.nn.Module): |
|
def __init__(self, group_size, num_channels=1): |
|
super().__init__() |
|
self.group_size = group_size |
|
self.num_channels = num_channels |
|
|
|
def forward(self, x): |
|
N, C, H, W = x.shape |
|
with misc.suppress_tracer_warnings(): |
|
G = ( |
|
torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) |
|
if self.group_size is not None |
|
else N |
|
) |
|
F = self.num_channels |
|
c = C // F |
|
|
|
y = x.reshape( |
|
G, -1, F, c, H, W |
|
) |
|
y = y - y.mean(dim=0) |
|
y = y.square().mean(dim=0) |
|
y = (y + 1e-8).sqrt() |
|
y = y.mean(dim=[2, 3, 4]) |
|
y = y.reshape(-1, F, 1, 1) |
|
y = y.repeat(G, 1, H, W) |
|
x = torch.cat([x, y], dim=1) |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class DiscriminatorEpilogue(torch.nn.Module): |
|
def __init__( |
|
self, |
|
in_channels, |
|
cmap_dim, |
|
resolution, |
|
img_channels, |
|
architecture="resnet", |
|
mbstd_group_size=4, |
|
mbstd_num_channels=1, |
|
activation="lrelu", |
|
conv_clamp=None, |
|
): |
|
assert architecture in ["orig", "skip", "resnet"] |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.cmap_dim = cmap_dim |
|
self.resolution = resolution |
|
self.img_channels = img_channels |
|
self.architecture = architecture |
|
|
|
if architecture == "skip": |
|
self.fromrgb = Conv2dLayer( |
|
img_channels, in_channels, kernel_size=1, activation=activation |
|
) |
|
self.mbstd = ( |
|
MinibatchStdLayer( |
|
group_size=mbstd_group_size, num_channels=mbstd_num_channels |
|
) |
|
if mbstd_num_channels > 0 |
|
else None |
|
) |
|
self.conv = Conv2dLayer( |
|
in_channels + mbstd_num_channels, |
|
in_channels, |
|
kernel_size=3, |
|
activation=activation, |
|
conv_clamp=conv_clamp, |
|
) |
|
self.fc = FullyConnectedLayer( |
|
in_channels * (resolution ** 2), in_channels, activation=activation |
|
) |
|
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim) |
|
|
|
def forward(self, x, img, cmap, force_fp32=False): |
|
misc.assert_shape( |
|
x, [None, self.in_channels, self.resolution, self.resolution] |
|
) |
|
_ = force_fp32 |
|
dtype = torch.float32 |
|
memory_format = torch.contiguous_format |
|
|
|
|
|
x = x.to(dtype=dtype, memory_format=memory_format) |
|
if self.architecture == "skip": |
|
misc.assert_shape( |
|
img, [None, self.img_channels, self.resolution, self.resolution] |
|
) |
|
img = img.to(dtype=dtype, memory_format=memory_format) |
|
x = x + self.fromrgb(img) |
|
|
|
|
|
if self.mbstd is not None: |
|
x = self.mbstd(x) |
|
x = self.conv(x) |
|
x = self.fc(x.flatten(1)) |
|
x = self.out(x) |
|
|
|
|
|
if self.cmap_dim > 0: |
|
misc.assert_shape(cmap, [None, self.cmap_dim]) |
|
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) |
|
|
|
assert x.dtype == dtype |
|
return x |
|
|
|
|
|
|
|
|
|
|
|
@persistence.persistent_class |
|
class Discriminator(torch.nn.Module): |
|
def __init__( |
|
self, |
|
c_dim, |
|
h_dim, |
|
img_resolution, |
|
img_channels, |
|
architecture="resnet", |
|
channel_base=32768, |
|
channel_max=512, |
|
num_fp16_res=0, |
|
conv_clamp=None, |
|
cmap_dim=None, |
|
block_kwargs={}, |
|
mapping_kwargs={}, |
|
epilogue_kwargs={}, |
|
): |
|
super().__init__() |
|
self.c_dim = c_dim |
|
self.h_dim = h_dim |
|
self.img_resolution = img_resolution |
|
self.img_resolution_log2 = int(np.log2(img_resolution)) |
|
self.img_channels = img_channels |
|
self.block_resolutions = [ |
|
2 ** i for i in range(self.img_resolution_log2, 2, -1) |
|
] |
|
channels_dict = { |
|
res: min(channel_base // res, channel_max) |
|
for res in self.block_resolutions + [4] |
|
} |
|
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) |
|
|
|
if cmap_dim is None: |
|
cmap_dim = channels_dict[4] |
|
if c_dim == 0 and h_dim == 0: |
|
cmap_dim = 0 |
|
|
|
common_kwargs = dict( |
|
img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp |
|
) |
|
cur_layer_idx = 0 |
|
for res in self.block_resolutions: |
|
in_channels = channels_dict[res] if res < img_resolution else 0 |
|
tmp_channels = channels_dict[res] |
|
out_channels = channels_dict[res // 2] |
|
use_fp16 = res >= fp16_resolution |
|
block = DiscriminatorBlock( |
|
in_channels, |
|
tmp_channels, |
|
out_channels, |
|
resolution=res, |
|
first_layer_idx=cur_layer_idx, |
|
use_fp16=use_fp16, |
|
**block_kwargs, |
|
**common_kwargs, |
|
) |
|
setattr(self, f"b{res}", block) |
|
cur_layer_idx += block.num_layers |
|
if c_dim > 0 or h_dim > 0: |
|
self.mapping = MappingNetwork( |
|
z_dim=0, |
|
c_dim=c_dim, |
|
h_dim=h_dim, |
|
w_dim=cmap_dim, |
|
num_ws=None, |
|
w_avg_beta=None, |
|
**mapping_kwargs, |
|
) |
|
self.b4 = DiscriminatorEpilogue( |
|
channels_dict[4], |
|
cmap_dim=cmap_dim, |
|
resolution=4, |
|
**epilogue_kwargs, |
|
**common_kwargs, |
|
) |
|
|
|
def forward(self, img, c, h, **block_kwargs): |
|
x = None |
|
for res in self.block_resolutions: |
|
block = getattr(self, f"b{res}") |
|
x, img = block(x, img, **block_kwargs) |
|
|
|
cmap = None |
|
if self.c_dim > 0 or self.h_dim > 0: |
|
cmap = self.mapping(None, c, h) |
|
x = self.b4(x, img, cmap) |
|
return x |
|
|
|
|
|
|
|
|