Commit
•
11be304
1
Parent(s):
de5987d
Upload 3 files
Browse files- Portrait_net_G.pth +3 -0
- model.py +38 -0
- networks.py +416 -0
Portrait_net_G.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:247e6582d90d36917a18df0e0909d5ec0b7d79ebe762e649842e36f7574d6c27
|
3 |
+
size 729793853
|
model.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from networks import ResnetBlock
|
2 |
+
import functools
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
|
6 |
+
# class GenerativeModel():
|
7 |
+
# def __init__(self):
|
8 |
+
# self.model = networks.define_G(3, 3,64, "global", 4, 9, 1,3, "instance", gpu_ids=[0])
|
9 |
+
|
10 |
+
class GlobalGenerator(nn.Module):
|
11 |
+
def __init__(self, input_nc=3, output_nc=3, ngf=64, n_downsampling=4, n_blocks=9, norm_layer=functools.partial(nn.InstanceNorm2d, affine=False),
|
12 |
+
padding_type='reflect'):
|
13 |
+
assert(n_blocks >= 0)
|
14 |
+
super(GlobalGenerator, self).__init__()
|
15 |
+
activation = nn.ReLU(True)
|
16 |
+
|
17 |
+
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
|
18 |
+
### downsample
|
19 |
+
for i in range(n_downsampling):
|
20 |
+
mult = 2**i
|
21 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
|
22 |
+
norm_layer(ngf * mult * 2), activation]
|
23 |
+
|
24 |
+
### resnet blocks
|
25 |
+
mult = 2**n_downsampling
|
26 |
+
for i in range(n_blocks):
|
27 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
|
28 |
+
|
29 |
+
### upsample
|
30 |
+
for i in range(n_downsampling):
|
31 |
+
mult = 2**(n_downsampling - i)
|
32 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
|
33 |
+
norm_layer(int(ngf * mult / 2)), activation]
|
34 |
+
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
|
35 |
+
self.model = nn.Sequential(*model)
|
36 |
+
|
37 |
+
def forward(self, input):
|
38 |
+
return self.model(input)
|
networks.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import functools
|
4 |
+
from torch.autograd import Variable
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
###############################################################################
|
8 |
+
# Functions
|
9 |
+
###############################################################################
|
10 |
+
def weights_init(m):
|
11 |
+
classname = m.__class__.__name__
|
12 |
+
if classname.find('Conv') != -1:
|
13 |
+
m.weight.data.normal_(0.0, 0.02)
|
14 |
+
elif classname.find('BatchNorm2d') != -1:
|
15 |
+
m.weight.data.normal_(1.0, 0.02)
|
16 |
+
m.bias.data.fill_(0)
|
17 |
+
|
18 |
+
def get_norm_layer(norm_type='instance'):
|
19 |
+
if norm_type == 'batch':
|
20 |
+
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
|
21 |
+
elif norm_type == 'instance':
|
22 |
+
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
|
23 |
+
else:
|
24 |
+
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
|
25 |
+
return norm_layer
|
26 |
+
|
27 |
+
def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
|
28 |
+
n_blocks_local=3, norm='instance', gpu_ids=[]):
|
29 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
30 |
+
if netG == 'global':
|
31 |
+
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
|
32 |
+
elif netG == 'local':
|
33 |
+
netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global,
|
34 |
+
n_local_enhancers, n_blocks_local, norm_layer)
|
35 |
+
elif netG == 'encoder':
|
36 |
+
netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)
|
37 |
+
else:
|
38 |
+
raise('generator not implemented!')
|
39 |
+
print(netG)
|
40 |
+
if len(gpu_ids) > 0:
|
41 |
+
assert(torch.cuda.is_available())
|
42 |
+
netG.cuda(gpu_ids[0])
|
43 |
+
netG.apply(weights_init)
|
44 |
+
return netG
|
45 |
+
|
46 |
+
def define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]):
|
47 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
48 |
+
netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)
|
49 |
+
print(netD)
|
50 |
+
if len(gpu_ids) > 0:
|
51 |
+
assert(torch.cuda.is_available())
|
52 |
+
netD.cuda(gpu_ids[0])
|
53 |
+
netD.apply(weights_init)
|
54 |
+
return netD
|
55 |
+
|
56 |
+
def print_network(net):
|
57 |
+
if isinstance(net, list):
|
58 |
+
net = net[0]
|
59 |
+
num_params = 0
|
60 |
+
for param in net.parameters():
|
61 |
+
num_params += param.numel()
|
62 |
+
print(net)
|
63 |
+
print('Total number of parameters: %d' % num_params)
|
64 |
+
|
65 |
+
##############################################################################
|
66 |
+
# Losses
|
67 |
+
##############################################################################
|
68 |
+
class GANLoss(nn.Module):
|
69 |
+
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
|
70 |
+
tensor=torch.FloatTensor):
|
71 |
+
super(GANLoss, self).__init__()
|
72 |
+
self.real_label = target_real_label
|
73 |
+
self.fake_label = target_fake_label
|
74 |
+
self.real_label_var = None
|
75 |
+
self.fake_label_var = None
|
76 |
+
self.Tensor = tensor
|
77 |
+
if use_lsgan:
|
78 |
+
self.loss = nn.MSELoss()
|
79 |
+
else:
|
80 |
+
self.loss = nn.BCELoss()
|
81 |
+
|
82 |
+
def get_target_tensor(self, input, target_is_real):
|
83 |
+
target_tensor = None
|
84 |
+
if target_is_real:
|
85 |
+
create_label = ((self.real_label_var is None) or
|
86 |
+
(self.real_label_var.numel() != input.numel()))
|
87 |
+
if create_label:
|
88 |
+
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
|
89 |
+
self.real_label_var = Variable(real_tensor, requires_grad=False)
|
90 |
+
target_tensor = self.real_label_var
|
91 |
+
else:
|
92 |
+
create_label = ((self.fake_label_var is None) or
|
93 |
+
(self.fake_label_var.numel() != input.numel()))
|
94 |
+
if create_label:
|
95 |
+
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
|
96 |
+
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
|
97 |
+
target_tensor = self.fake_label_var
|
98 |
+
return target_tensor
|
99 |
+
|
100 |
+
def __call__(self, input, target_is_real):
|
101 |
+
if isinstance(input[0], list):
|
102 |
+
loss = 0
|
103 |
+
for input_i in input:
|
104 |
+
pred = input_i[-1]
|
105 |
+
target_tensor = self.get_target_tensor(pred, target_is_real)
|
106 |
+
loss += self.loss(pred, target_tensor)
|
107 |
+
return loss
|
108 |
+
else:
|
109 |
+
target_tensor = self.get_target_tensor(input[-1], target_is_real)
|
110 |
+
return self.loss(input[-1], target_tensor)
|
111 |
+
|
112 |
+
class VGGLoss(nn.Module):
|
113 |
+
def __init__(self, gpu_ids):
|
114 |
+
super(VGGLoss, self).__init__()
|
115 |
+
self.vgg = Vgg19().cuda()
|
116 |
+
self.criterion = nn.L1Loss()
|
117 |
+
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
|
118 |
+
|
119 |
+
def forward(self, x, y):
|
120 |
+
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
|
121 |
+
loss = 0
|
122 |
+
for i in range(len(x_vgg)):
|
123 |
+
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
|
124 |
+
return loss
|
125 |
+
|
126 |
+
##############################################################################
|
127 |
+
# Generator
|
128 |
+
##############################################################################
|
129 |
+
class LocalEnhancer(nn.Module):
|
130 |
+
def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9,
|
131 |
+
n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):
|
132 |
+
super(LocalEnhancer, self).__init__()
|
133 |
+
self.n_local_enhancers = n_local_enhancers
|
134 |
+
|
135 |
+
###### global generator model #####
|
136 |
+
ngf_global = ngf * (2**n_local_enhancers)
|
137 |
+
model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model
|
138 |
+
model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers
|
139 |
+
self.model = nn.Sequential(*model_global)
|
140 |
+
|
141 |
+
###### local enhancer layers #####
|
142 |
+
for n in range(1, n_local_enhancers+1):
|
143 |
+
### downsample
|
144 |
+
ngf_global = ngf * (2**(n_local_enhancers-n))
|
145 |
+
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
|
146 |
+
norm_layer(ngf_global), nn.ReLU(True),
|
147 |
+
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
|
148 |
+
norm_layer(ngf_global * 2), nn.ReLU(True)]
|
149 |
+
### residual blocks
|
150 |
+
model_upsample = []
|
151 |
+
for i in range(n_blocks_local):
|
152 |
+
model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
|
153 |
+
|
154 |
+
### upsample
|
155 |
+
model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
|
156 |
+
norm_layer(ngf_global), nn.ReLU(True)]
|
157 |
+
|
158 |
+
### final convolution
|
159 |
+
if n == n_local_enhancers:
|
160 |
+
model_upsample += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
|
161 |
+
|
162 |
+
setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
|
163 |
+
setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))
|
164 |
+
|
165 |
+
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
|
166 |
+
|
167 |
+
def forward(self, input):
|
168 |
+
### create input pyramid
|
169 |
+
input_downsampled = [input]
|
170 |
+
for i in range(self.n_local_enhancers):
|
171 |
+
input_downsampled.append(self.downsample(input_downsampled[-1]))
|
172 |
+
|
173 |
+
### output at coarest level
|
174 |
+
output_prev = self.model(input_downsampled[-1])
|
175 |
+
### build up one layer at a time
|
176 |
+
for n_local_enhancers in range(1, self.n_local_enhancers+1):
|
177 |
+
model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
|
178 |
+
model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')
|
179 |
+
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
|
180 |
+
output_prev = model_upsample(model_downsample(input_i) + output_prev)
|
181 |
+
return output_prev
|
182 |
+
|
183 |
+
class GlobalGenerator(nn.Module):
|
184 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
185 |
+
padding_type='reflect'):
|
186 |
+
assert(n_blocks >= 0)
|
187 |
+
super(GlobalGenerator, self).__init__()
|
188 |
+
activation = nn.ReLU(True)
|
189 |
+
|
190 |
+
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
|
191 |
+
### downsample
|
192 |
+
for i in range(n_downsampling):
|
193 |
+
mult = 2**i
|
194 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
|
195 |
+
norm_layer(ngf * mult * 2), activation]
|
196 |
+
|
197 |
+
### resnet blocks
|
198 |
+
mult = 2**n_downsampling
|
199 |
+
for i in range(n_blocks):
|
200 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
|
201 |
+
|
202 |
+
### upsample
|
203 |
+
for i in range(n_downsampling):
|
204 |
+
mult = 2**(n_downsampling - i)
|
205 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
|
206 |
+
norm_layer(int(ngf * mult / 2)), activation]
|
207 |
+
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
|
208 |
+
self.model = nn.Sequential(*model)
|
209 |
+
|
210 |
+
def forward(self, input):
|
211 |
+
return self.model(input)
|
212 |
+
|
213 |
+
# Define a resnet block
|
214 |
+
class ResnetBlock(nn.Module):
|
215 |
+
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
|
216 |
+
super(ResnetBlock, self).__init__()
|
217 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
|
218 |
+
|
219 |
+
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
|
220 |
+
conv_block = []
|
221 |
+
p = 0
|
222 |
+
if padding_type == 'reflect':
|
223 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
224 |
+
elif padding_type == 'replicate':
|
225 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
226 |
+
elif padding_type == 'zero':
|
227 |
+
p = 1
|
228 |
+
else:
|
229 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
230 |
+
|
231 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
|
232 |
+
norm_layer(dim),
|
233 |
+
activation]
|
234 |
+
if use_dropout:
|
235 |
+
conv_block += [nn.Dropout(0.5)]
|
236 |
+
|
237 |
+
p = 0
|
238 |
+
if padding_type == 'reflect':
|
239 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
240 |
+
elif padding_type == 'replicate':
|
241 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
242 |
+
elif padding_type == 'zero':
|
243 |
+
p = 1
|
244 |
+
else:
|
245 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
246 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
|
247 |
+
norm_layer(dim)]
|
248 |
+
|
249 |
+
return nn.Sequential(*conv_block)
|
250 |
+
|
251 |
+
def forward(self, x):
|
252 |
+
out = x + self.conv_block(x)
|
253 |
+
return out
|
254 |
+
|
255 |
+
class Encoder(nn.Module):
|
256 |
+
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
|
257 |
+
super(Encoder, self).__init__()
|
258 |
+
self.output_nc = output_nc
|
259 |
+
|
260 |
+
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
|
261 |
+
norm_layer(ngf), nn.ReLU(True)]
|
262 |
+
### downsample
|
263 |
+
for i in range(n_downsampling):
|
264 |
+
mult = 2**i
|
265 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
|
266 |
+
norm_layer(ngf * mult * 2), nn.ReLU(True)]
|
267 |
+
|
268 |
+
### upsample
|
269 |
+
for i in range(n_downsampling):
|
270 |
+
mult = 2**(n_downsampling - i)
|
271 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
|
272 |
+
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
|
273 |
+
|
274 |
+
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
|
275 |
+
self.model = nn.Sequential(*model)
|
276 |
+
|
277 |
+
def forward(self, input, inst):
|
278 |
+
outputs = self.model(input)
|
279 |
+
|
280 |
+
# instance-wise average pooling
|
281 |
+
outputs_mean = outputs.clone()
|
282 |
+
inst_list = np.unique(inst.cpu().numpy().astype(int))
|
283 |
+
for i in inst_list:
|
284 |
+
for b in range(input.size()[0]):
|
285 |
+
indices = (inst[b:b+1] == int(i)).nonzero() # n x 4
|
286 |
+
for j in range(self.output_nc):
|
287 |
+
output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]
|
288 |
+
mean_feat = torch.mean(output_ins).expand_as(output_ins)
|
289 |
+
outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat
|
290 |
+
return outputs_mean
|
291 |
+
|
292 |
+
class MultiscaleDiscriminator(nn.Module):
|
293 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
|
294 |
+
use_sigmoid=False, num_D=3, getIntermFeat=False):
|
295 |
+
super(MultiscaleDiscriminator, self).__init__()
|
296 |
+
self.num_D = num_D
|
297 |
+
self.n_layers = n_layers
|
298 |
+
self.getIntermFeat = getIntermFeat
|
299 |
+
|
300 |
+
for i in range(num_D):
|
301 |
+
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
|
302 |
+
if getIntermFeat:
|
303 |
+
for j in range(n_layers+2):
|
304 |
+
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
|
305 |
+
else:
|
306 |
+
setattr(self, 'layer'+str(i), netD.model)
|
307 |
+
|
308 |
+
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
|
309 |
+
|
310 |
+
def singleD_forward(self, model, input):
|
311 |
+
if self.getIntermFeat:
|
312 |
+
result = [input]
|
313 |
+
for i in range(len(model)):
|
314 |
+
result.append(model[i](result[-1]))
|
315 |
+
return result[1:]
|
316 |
+
else:
|
317 |
+
return [model(input)]
|
318 |
+
|
319 |
+
def forward(self, input):
|
320 |
+
num_D = self.num_D
|
321 |
+
result = []
|
322 |
+
input_downsampled = input
|
323 |
+
for i in range(num_D):
|
324 |
+
if self.getIntermFeat:
|
325 |
+
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
|
326 |
+
else:
|
327 |
+
model = getattr(self, 'layer'+str(num_D-1-i))
|
328 |
+
result.append(self.singleD_forward(model, input_downsampled))
|
329 |
+
if i != (num_D-1):
|
330 |
+
input_downsampled = self.downsample(input_downsampled)
|
331 |
+
return result
|
332 |
+
|
333 |
+
# Defines the PatchGAN discriminator with the specified arguments.
|
334 |
+
class NLayerDiscriminator(nn.Module):
|
335 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
|
336 |
+
super(NLayerDiscriminator, self).__init__()
|
337 |
+
self.getIntermFeat = getIntermFeat
|
338 |
+
self.n_layers = n_layers
|
339 |
+
|
340 |
+
kw = 4
|
341 |
+
padw = int(np.ceil((kw-1.0)/2))
|
342 |
+
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
|
343 |
+
|
344 |
+
nf = ndf
|
345 |
+
for n in range(1, n_layers):
|
346 |
+
nf_prev = nf
|
347 |
+
nf = min(nf * 2, 512)
|
348 |
+
sequence += [[
|
349 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
|
350 |
+
norm_layer(nf), nn.LeakyReLU(0.2, True)
|
351 |
+
]]
|
352 |
+
|
353 |
+
nf_prev = nf
|
354 |
+
nf = min(nf * 2, 512)
|
355 |
+
sequence += [[
|
356 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
|
357 |
+
norm_layer(nf),
|
358 |
+
nn.LeakyReLU(0.2, True)
|
359 |
+
]]
|
360 |
+
|
361 |
+
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
362 |
+
|
363 |
+
if use_sigmoid:
|
364 |
+
sequence += [[nn.Sigmoid()]]
|
365 |
+
|
366 |
+
if getIntermFeat:
|
367 |
+
for n in range(len(sequence)):
|
368 |
+
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
|
369 |
+
else:
|
370 |
+
sequence_stream = []
|
371 |
+
for n in range(len(sequence)):
|
372 |
+
sequence_stream += sequence[n]
|
373 |
+
self.model = nn.Sequential(*sequence_stream)
|
374 |
+
|
375 |
+
def forward(self, input):
|
376 |
+
if self.getIntermFeat:
|
377 |
+
res = [input]
|
378 |
+
for n in range(self.n_layers+2):
|
379 |
+
model = getattr(self, 'model'+str(n))
|
380 |
+
res.append(model(res[-1]))
|
381 |
+
return res[1:]
|
382 |
+
else:
|
383 |
+
return self.model(input)
|
384 |
+
|
385 |
+
from torchvision import models
|
386 |
+
class Vgg19(torch.nn.Module):
|
387 |
+
def __init__(self, requires_grad=False):
|
388 |
+
super(Vgg19, self).__init__()
|
389 |
+
vgg_pretrained_features = models.vgg19(pretrained=True).features
|
390 |
+
self.slice1 = torch.nn.Sequential()
|
391 |
+
self.slice2 = torch.nn.Sequential()
|
392 |
+
self.slice3 = torch.nn.Sequential()
|
393 |
+
self.slice4 = torch.nn.Sequential()
|
394 |
+
self.slice5 = torch.nn.Sequential()
|
395 |
+
for x in range(2):
|
396 |
+
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
397 |
+
for x in range(2, 7):
|
398 |
+
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
399 |
+
for x in range(7, 12):
|
400 |
+
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
401 |
+
for x in range(12, 21):
|
402 |
+
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
403 |
+
for x in range(21, 30):
|
404 |
+
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
405 |
+
if not requires_grad:
|
406 |
+
for param in self.parameters():
|
407 |
+
param.requires_grad = False
|
408 |
+
|
409 |
+
def forward(self, X):
|
410 |
+
h_relu1 = self.slice1(X)
|
411 |
+
h_relu2 = self.slice2(h_relu1)
|
412 |
+
h_relu3 = self.slice3(h_relu2)
|
413 |
+
h_relu4 = self.slice4(h_relu3)
|
414 |
+
h_relu5 = self.slice5(h_relu4)
|
415 |
+
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
|
416 |
+
return out
|