DmitrMakeev
commited on
Commit
•
02cacbe
1
Parent(s):
922f55d
Upload 7 files
Browse files- models/audio2pose.py +36 -0
- models/dense_motion.py +114 -0
- models/generator.py +99 -0
- models/keypoint_detector.py +80 -0
- models/resnet.py +204 -0
- models/transformer.py +391 -0
- models/util.py +354 -0
models/audio2pose.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch
|
3 |
+
from models.util import MyResNet34
|
4 |
+
|
5 |
+
class audio2poseLSTM(nn.Module):
|
6 |
+
def __init__(self):
|
7 |
+
super(audio2poseLSTM,self).__init__()
|
8 |
+
|
9 |
+
self.em_pose = MyResNet34(256, 1)
|
10 |
+
self.em_audio = MyResNet34(256, 1)
|
11 |
+
self.lstm = nn.LSTM(512,256,num_layers=2,bias=True,batch_first=True)
|
12 |
+
|
13 |
+
self.output = nn.Linear(256,6)
|
14 |
+
|
15 |
+
|
16 |
+
def forward(self,x):
|
17 |
+
pose_em = self.em_pose(x["img"])
|
18 |
+
bs = pose_em.shape[0]
|
19 |
+
zero_state = torch.zeros((2, bs, 256), requires_grad=True).to(pose_em.device)
|
20 |
+
cur_state = (zero_state, zero_state)
|
21 |
+
img_em = pose_em
|
22 |
+
bs,seqlen,num,dims = x["audio"].shape
|
23 |
+
|
24 |
+
audio = x["audio"].reshape(-1, 1, num, dims)
|
25 |
+
audio_em = self.em_audio(audio).reshape(bs, seqlen, 256)
|
26 |
+
|
27 |
+
result = [self.output(img_em).unsqueeze(1)]
|
28 |
+
|
29 |
+
for i in range(seqlen):
|
30 |
+
|
31 |
+
img_em,cur_state = self.lstm(torch.cat((audio_em[:,i:i+1],img_em.unsqueeze(1)),dim=2),cur_state)
|
32 |
+
img_em = img_em.reshape(-1, 256)
|
33 |
+
|
34 |
+
result.append(self.output(img_em).unsqueeze(1))
|
35 |
+
res = torch.cat(result,dim=1)
|
36 |
+
return res
|
models/dense_motion.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn
|
2 |
+
import torch.nn.functional as F
|
3 |
+
import torch
|
4 |
+
from models.util import Hourglass, AntiAliasInterpolation2d, make_coordinate_grid, kp2gaussian
|
5 |
+
|
6 |
+
|
7 |
+
class DenseMotionNetwork(nn.Module):
|
8 |
+
"""
|
9 |
+
Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
|
10 |
+
"""
|
11 |
+
|
12 |
+
def __init__(self, block_expansion, num_blocks, max_features, num_kp, num_channels, estimate_occlusion_map=False,
|
13 |
+
scale_factor=1, kp_variance=0.01):
|
14 |
+
super(DenseMotionNetwork, self).__init__()
|
15 |
+
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp + 1) * (num_channels + 1),
|
16 |
+
max_features=max_features, num_blocks=num_blocks)
|
17 |
+
|
18 |
+
self.mask = nn.Conv2d(self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3))
|
19 |
+
|
20 |
+
if estimate_occlusion_map:
|
21 |
+
self.occlusion = nn.Conv2d(self.hourglass.out_filters, 1, kernel_size=(7, 7), padding=(3, 3))
|
22 |
+
else:
|
23 |
+
self.occlusion = None
|
24 |
+
|
25 |
+
self.num_kp = num_kp
|
26 |
+
self.scale_factor = scale_factor
|
27 |
+
self.kp_variance = kp_variance
|
28 |
+
|
29 |
+
if self.scale_factor != 1:
|
30 |
+
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
|
31 |
+
|
32 |
+
def create_heatmap_representations(self, source_image, kp_driving, kp_source):
|
33 |
+
"""
|
34 |
+
Eq 6. in the paper H_k(z)
|
35 |
+
"""
|
36 |
+
spatial_size = source_image.shape[2:]
|
37 |
+
gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=self.kp_variance)
|
38 |
+
gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=self.kp_variance)
|
39 |
+
heatmap = gaussian_driving - gaussian_source
|
40 |
+
|
41 |
+
#adding background feature
|
42 |
+
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(heatmap.type())
|
43 |
+
heatmap = torch.cat([zeros, heatmap], dim=1)
|
44 |
+
heatmap = heatmap.unsqueeze(2)
|
45 |
+
return heatmap
|
46 |
+
|
47 |
+
def create_sparse_motions(self, source_image, kp_driving, kp_source):
|
48 |
+
"""
|
49 |
+
Eq 4. in the paper T_{s<-d}(z)
|
50 |
+
"""
|
51 |
+
bs, _, h, w = source_image.shape
|
52 |
+
identity_grid = make_coordinate_grid((h, w), type=kp_source['value'].type())
|
53 |
+
identity_grid = identity_grid.view(1, 1, h, w, 2)
|
54 |
+
coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 2)
|
55 |
+
if 'jacobian' in kp_driving:
|
56 |
+
jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))
|
57 |
+
jacobian = jacobian.unsqueeze(-3).unsqueeze(-3)
|
58 |
+
jacobian = jacobian.repeat(1, 1, h, w, 1, 1)
|
59 |
+
coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))
|
60 |
+
coordinate_grid = coordinate_grid.squeeze(-1)
|
61 |
+
|
62 |
+
driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 2)
|
63 |
+
|
64 |
+
#adding background feature
|
65 |
+
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1)
|
66 |
+
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)
|
67 |
+
return sparse_motions
|
68 |
+
|
69 |
+
def create_deformed_source_image(self, source_image, sparse_motions):
|
70 |
+
"""
|
71 |
+
Eq 7. in the paper \hat{T}_{s<-d}(z)
|
72 |
+
"""
|
73 |
+
bs, _, h, w = source_image.shape
|
74 |
+
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
|
75 |
+
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
|
76 |
+
sparse_motions = sparse_motions.view((bs * (self.num_kp + 1), h, w, -1))
|
77 |
+
sparse_deformed = F.grid_sample(source_repeat, sparse_motions)
|
78 |
+
# sparse_deformed = F.grid_sample(source_repeat, sparse_motions,align_corners = False)
|
79 |
+
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1, -1, h, w))
|
80 |
+
return sparse_deformed
|
81 |
+
|
82 |
+
def forward(self, source_image, kp_driving, kp_source):
|
83 |
+
if self.scale_factor != 1:
|
84 |
+
source_image = self.down(source_image)
|
85 |
+
|
86 |
+
bs, _, h, w = source_image.shape
|
87 |
+
|
88 |
+
out_dict = dict()
|
89 |
+
heatmap_representation = self.create_heatmap_representations(source_image, kp_driving, kp_source)#bs*(numkp+1)*1*h*w
|
90 |
+
sparse_motion = self.create_sparse_motions(source_image, kp_driving, kp_source)#bs*(numkp+1)*h*w*2
|
91 |
+
deformed_source = self.create_deformed_source_image(source_image, sparse_motion)
|
92 |
+
out_dict['sparse_deformed'] = deformed_source
|
93 |
+
|
94 |
+
input = torch.cat([heatmap_representation, deformed_source], dim=2)#bs*num+1*4*w*h
|
95 |
+
input = input.view(bs, -1, h, w)
|
96 |
+
|
97 |
+
prediction = self.hourglass(input)
|
98 |
+
|
99 |
+
mask = self.mask(prediction)
|
100 |
+
mask = F.softmax(mask, dim=1)
|
101 |
+
out_dict['mask'] = mask
|
102 |
+
mask = mask.unsqueeze(2)#bs*numkp+1*1*h*w
|
103 |
+
sparse_motion = sparse_motion.permute(0, 1, 4, 2, 3)
|
104 |
+
deformation = (sparse_motion * mask).sum(dim=1)# bs,2,64,64
|
105 |
+
deformation = deformation.permute(0, 2, 3, 1)#bs*h*w*2
|
106 |
+
|
107 |
+
out_dict['deformation'] = deformation
|
108 |
+
|
109 |
+
# Sec. 3.2 in the paper
|
110 |
+
if self.occlusion:
|
111 |
+
occlusion_map = torch.sigmoid(self.occlusion(prediction))
|
112 |
+
out_dict['occlusion_map'] = occlusion_map
|
113 |
+
|
114 |
+
return out_dict
|
models/generator.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from models.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d
|
5 |
+
from models.dense_motion import DenseMotionNetwork
|
6 |
+
|
7 |
+
|
8 |
+
class OcclusionAwareGenerator(nn.Module):
|
9 |
+
"""
|
10 |
+
Generator that given source image and and keypoints try to transform image according to movement trajectories
|
11 |
+
induced by keypoints. Generator follows Johnson architecture.
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self, num_channels, num_kp, block_expansion, max_features, num_down_blocks,
|
15 |
+
num_bottleneck_blocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
|
16 |
+
super(OcclusionAwareGenerator, self).__init__()
|
17 |
+
|
18 |
+
if dense_motion_params is not None:
|
19 |
+
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, num_channels=num_channels,
|
20 |
+
estimate_occlusion_map=estimate_occlusion_map,
|
21 |
+
**dense_motion_params)
|
22 |
+
else:
|
23 |
+
self.dense_motion_network = None
|
24 |
+
|
25 |
+
self.first = SameBlock2d(num_channels, block_expansion, kernel_size=(7, 7), padding=(3, 3))
|
26 |
+
|
27 |
+
down_blocks = []
|
28 |
+
for i in range(num_down_blocks):
|
29 |
+
in_features = min(max_features, block_expansion * (2 ** i))
|
30 |
+
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
31 |
+
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
|
32 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
33 |
+
|
34 |
+
up_blocks = []
|
35 |
+
for i in range(num_down_blocks):
|
36 |
+
in_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i)))
|
37 |
+
out_features = min(max_features, block_expansion * (2 ** (num_down_blocks - i - 1)))
|
38 |
+
up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
|
39 |
+
self.up_blocks = nn.ModuleList(up_blocks)
|
40 |
+
|
41 |
+
self.bottleneck = torch.nn.Sequential()
|
42 |
+
in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
|
43 |
+
for i in range(num_bottleneck_blocks):
|
44 |
+
self.bottleneck.add_module('r' + str(i), ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)))
|
45 |
+
|
46 |
+
self.final = nn.Conv2d(block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3))
|
47 |
+
self.estimate_occlusion_map = estimate_occlusion_map
|
48 |
+
self.num_channels = num_channels
|
49 |
+
|
50 |
+
def deform_input(self, inp, deformation):
|
51 |
+
_, h_old, w_old, _ = deformation.shape
|
52 |
+
_, _, h, w = inp.shape
|
53 |
+
if h_old != h or w_old != w:
|
54 |
+
deformation = deformation.permute(0, 3, 1, 2)
|
55 |
+
deformation = F.interpolate(deformation, size=(h, w), mode='bilinear')
|
56 |
+
deformation = deformation.permute(0, 2, 3, 1)
|
57 |
+
return F.grid_sample(inp, deformation)
|
58 |
+
# return F.grid_sample(inp, deformation,align_corners = False)
|
59 |
+
|
60 |
+
def forward(self, source_image, kp_driving, kp_source):
|
61 |
+
# Encoding (downsampling) part
|
62 |
+
out = self.first(source_image)
|
63 |
+
for i in range(len(self.down_blocks)):
|
64 |
+
out = self.down_blocks[i](out)
|
65 |
+
|
66 |
+
# Transforming feature representation according to deformation and occlusion
|
67 |
+
output_dict = {}
|
68 |
+
if self.dense_motion_network is not None:
|
69 |
+
dense_motion = self.dense_motion_network(source_image=source_image, kp_driving=kp_driving,
|
70 |
+
kp_source=kp_source)
|
71 |
+
output_dict['mask'] = dense_motion['mask']
|
72 |
+
output_dict['sparse_deformed'] = dense_motion['sparse_deformed']
|
73 |
+
output_dict['deformation'] = dense_motion['deformation']
|
74 |
+
|
75 |
+
if 'occlusion_map' in dense_motion:
|
76 |
+
occlusion_map = dense_motion['occlusion_map']
|
77 |
+
output_dict['occlusion_map'] = occlusion_map
|
78 |
+
else:
|
79 |
+
occlusion_map = None
|
80 |
+
deformation = dense_motion['deformation']
|
81 |
+
out = self.deform_input(out, deformation)
|
82 |
+
|
83 |
+
if occlusion_map is not None:
|
84 |
+
if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
|
85 |
+
occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')
|
86 |
+
out = out * occlusion_map
|
87 |
+
|
88 |
+
output_dict["deformed"] = self.deform_input(source_image, deformation)
|
89 |
+
|
90 |
+
# Decoding part
|
91 |
+
out = self.bottleneck(out)
|
92 |
+
for i in range(len(self.up_blocks)):
|
93 |
+
out = self.up_blocks[i](out)
|
94 |
+
out = self.final(out)
|
95 |
+
out = F.sigmoid(out)
|
96 |
+
|
97 |
+
output_dict["prediction"] = out
|
98 |
+
|
99 |
+
return output_dict
|
models/keypoint_detector.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn
|
2 |
+
import torch
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from models.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
class KPDetector(nn.Module):
|
9 |
+
"""
|
10 |
+
Detecting a keypoints. Return keypoint position and jacobian near each keypoint.
|
11 |
+
"""
|
12 |
+
|
13 |
+
def __init__(self, block_expansion, num_kp, num_channels, max_features,
|
14 |
+
num_blocks, temperature, estimate_jacobian=False, scale_factor=1,
|
15 |
+
single_jacobian_map=False, pad=0):
|
16 |
+
super(KPDetector, self).__init__()
|
17 |
+
|
18 |
+
self.predictor = Hourglass(block_expansion, in_features=num_channels,
|
19 |
+
max_features=max_features, num_blocks=num_blocks)
|
20 |
+
|
21 |
+
self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7, 7),
|
22 |
+
padding=pad)
|
23 |
+
|
24 |
+
if estimate_jacobian:
|
25 |
+
self.num_jacobian_maps = 1 if single_jacobian_map else num_kp
|
26 |
+
self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters,
|
27 |
+
out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad)
|
28 |
+
self.jacobian.weight.data.zero_()
|
29 |
+
self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))
|
30 |
+
else:
|
31 |
+
self.jacobian = None
|
32 |
+
|
33 |
+
self.temperature = temperature
|
34 |
+
self.scale_factor = scale_factor
|
35 |
+
if self.scale_factor != 1:
|
36 |
+
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
|
37 |
+
|
38 |
+
def gaussian2kp(self, heatmap):
|
39 |
+
"""
|
40 |
+
Extract the mean and from a heatmap
|
41 |
+
"""
|
42 |
+
shape = heatmap.shape
|
43 |
+
heatmap = heatmap.unsqueeze(-1)
|
44 |
+
grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)
|
45 |
+
value = (heatmap * grid).sum(dim=(2, 3))
|
46 |
+
kp = {'value': value}
|
47 |
+
|
48 |
+
return kp
|
49 |
+
|
50 |
+
def forward(self, x,with_feature = False):
|
51 |
+
if self.scale_factor != 1:
|
52 |
+
x = self.down(x)
|
53 |
+
|
54 |
+
feature_map = self.predictor(x)
|
55 |
+
prediction = self.kp(feature_map)
|
56 |
+
final_shape = prediction.shape
|
57 |
+
heatmap = prediction.view(final_shape[0], final_shape[1], -1)
|
58 |
+
heatmap = F.softmax(heatmap / self.temperature, dim=2)
|
59 |
+
heatmap = heatmap.view(*final_shape)
|
60 |
+
|
61 |
+
out = self.gaussian2kp(heatmap)
|
62 |
+
|
63 |
+
if self.jacobian is not None:
|
64 |
+
jacobian_map = self.jacobian(feature_map)
|
65 |
+
out["jacobian_map"] = jacobian_map
|
66 |
+
|
67 |
+
jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 4, final_shape[2],
|
68 |
+
final_shape[3])
|
69 |
+
|
70 |
+
heatmap = heatmap.unsqueeze(2)
|
71 |
+
|
72 |
+
jacobian = heatmap * jacobian_map
|
73 |
+
jacobian = jacobian.view(final_shape[0], final_shape[1], 4, -1)
|
74 |
+
jacobian = jacobian.sum(dim=-1)
|
75 |
+
jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 2, 2)
|
76 |
+
out['jacobian'] = jacobian
|
77 |
+
out["pred_feature"] = prediction
|
78 |
+
if with_feature:
|
79 |
+
out["feature_map"] = feature_map
|
80 |
+
return out
|
models/resnet.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
|
5 |
+
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
6 |
+
"""3x3 convolution with padding"""
|
7 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
8 |
+
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
9 |
+
|
10 |
+
|
11 |
+
def conv1x1(in_planes, out_planes, stride=1):
|
12 |
+
"""1x1 convolution"""
|
13 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
14 |
+
|
15 |
+
class BasicBlock(nn.Module):
|
16 |
+
expansion = 1
|
17 |
+
|
18 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
19 |
+
base_width=64, dilation=1, norm_layer=None):
|
20 |
+
super(BasicBlock, self).__init__()
|
21 |
+
if norm_layer is None:
|
22 |
+
norm_layer = nn.BatchNorm2d
|
23 |
+
if groups != 1 or base_width != 64:
|
24 |
+
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
|
25 |
+
if dilation > 1:
|
26 |
+
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
27 |
+
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
28 |
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
29 |
+
self.bn1 = norm_layer(planes)
|
30 |
+
self.relu = nn.ReLU(inplace=True)
|
31 |
+
self.conv2 = conv3x3(planes, planes)
|
32 |
+
self.bn2 = norm_layer(planes)
|
33 |
+
self.downsample = downsample
|
34 |
+
self.stride = stride
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
identity = x
|
38 |
+
|
39 |
+
out = self.conv1(x)
|
40 |
+
out = self.bn1(out)
|
41 |
+
out = self.relu(out)
|
42 |
+
|
43 |
+
out = self.conv2(out)
|
44 |
+
out = self.bn2(out)
|
45 |
+
|
46 |
+
if self.downsample is not None:
|
47 |
+
identity = self.downsample(x)
|
48 |
+
|
49 |
+
out += identity
|
50 |
+
out = self.relu(out)
|
51 |
+
|
52 |
+
return out
|
53 |
+
|
54 |
+
|
55 |
+
class Bottleneck(nn.Module):
|
56 |
+
expansion = 4
|
57 |
+
|
58 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
59 |
+
base_width=64, dilation=1, norm_layer=None):
|
60 |
+
super(Bottleneck, self).__init__()
|
61 |
+
if norm_layer is None:
|
62 |
+
norm_layer = nn.BatchNorm2d
|
63 |
+
width = int(planes * (base_width / 64.)) * groups
|
64 |
+
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
65 |
+
self.conv1 = conv1x1(inplanes, width)
|
66 |
+
self.bn1 = norm_layer(width)
|
67 |
+
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
68 |
+
self.bn2 = norm_layer(width)
|
69 |
+
self.conv3 = conv1x1(width, planes * self.expansion)
|
70 |
+
self.bn3 = norm_layer(planes * self.expansion)
|
71 |
+
self.relu = nn.ReLU(inplace=True)
|
72 |
+
self.downsample = downsample
|
73 |
+
self.stride = stride
|
74 |
+
|
75 |
+
def forward(self, x):
|
76 |
+
identity = x
|
77 |
+
|
78 |
+
out = self.conv1(x)
|
79 |
+
out = self.bn1(out)
|
80 |
+
out = self.relu(out)
|
81 |
+
|
82 |
+
out = self.conv2(out)
|
83 |
+
out = self.bn2(out)
|
84 |
+
out = self.relu(out)
|
85 |
+
|
86 |
+
out = self.conv3(out)
|
87 |
+
out = self.bn3(out)
|
88 |
+
|
89 |
+
if self.downsample is not None:
|
90 |
+
identity = self.downsample(x)
|
91 |
+
|
92 |
+
out += identity
|
93 |
+
out = self.relu(out)
|
94 |
+
|
95 |
+
return out
|
96 |
+
|
97 |
+
class ResNet(nn.Module):
|
98 |
+
|
99 |
+
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
|
100 |
+
groups=1, width_per_group=64, replace_stride_with_dilation=None,
|
101 |
+
norm_layer=None,input_channel = 3):
|
102 |
+
super(ResNet, self).__init__()
|
103 |
+
if norm_layer is None:
|
104 |
+
norm_layer = nn.BatchNorm2d
|
105 |
+
self._norm_layer = norm_layer
|
106 |
+
|
107 |
+
self.inplanes = 64
|
108 |
+
self.dilation = 1
|
109 |
+
if replace_stride_with_dilation is None:
|
110 |
+
# each element in the tuple indicates if we should replace
|
111 |
+
# the 2x2 stride with a dilated convolution instead
|
112 |
+
replace_stride_with_dilation = [False, False, False]
|
113 |
+
if len(replace_stride_with_dilation) != 3:
|
114 |
+
raise ValueError("replace_stride_with_dilation should be None "
|
115 |
+
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
|
116 |
+
self.groups = groups
|
117 |
+
self.base_width = width_per_group
|
118 |
+
self.conv1 = nn.Conv2d(input_channel, self.inplanes, kernel_size=7, stride=2, padding=3,
|
119 |
+
bias=False)
|
120 |
+
self.bn1 = norm_layer(self.inplanes)
|
121 |
+
self.relu = nn.ReLU(inplace=True)
|
122 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
123 |
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
124 |
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
|
125 |
+
dilate=replace_stride_with_dilation[0])
|
126 |
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
|
127 |
+
dilate=replace_stride_with_dilation[1])
|
128 |
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
|
129 |
+
dilate=replace_stride_with_dilation[2])
|
130 |
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
131 |
+
self.fc = nn.Linear(512 * block.expansion, num_classes)
|
132 |
+
|
133 |
+
for m in self.modules():
|
134 |
+
if isinstance(m, nn.Conv2d):
|
135 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
136 |
+
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
137 |
+
nn.init.constant_(m.weight, 1)
|
138 |
+
nn.init.constant_(m.bias, 0)
|
139 |
+
|
140 |
+
# Zero-initialize the last BN in each residual branch,
|
141 |
+
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
|
142 |
+
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
|
143 |
+
if zero_init_residual:
|
144 |
+
for m in self.modules():
|
145 |
+
if isinstance(m, Bottleneck):
|
146 |
+
nn.init.constant_(m.bn3.weight, 0)
|
147 |
+
elif isinstance(m, BasicBlock):
|
148 |
+
nn.init.constant_(m.bn2.weight, 0)
|
149 |
+
|
150 |
+
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
|
151 |
+
norm_layer = self._norm_layer
|
152 |
+
downsample = None
|
153 |
+
previous_dilation = self.dilation
|
154 |
+
if dilate:
|
155 |
+
self.dilation *= stride
|
156 |
+
stride = 1
|
157 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
158 |
+
downsample = nn.Sequential(
|
159 |
+
conv1x1(self.inplanes, planes * block.expansion, stride),
|
160 |
+
norm_layer(planes * block.expansion),
|
161 |
+
)
|
162 |
+
|
163 |
+
layers = []
|
164 |
+
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
|
165 |
+
self.base_width, previous_dilation, norm_layer))
|
166 |
+
self.inplanes = planes * block.expansion
|
167 |
+
for _ in range(1, blocks):
|
168 |
+
layers.append(block(self.inplanes, planes, groups=self.groups,
|
169 |
+
base_width=self.base_width, dilation=self.dilation,
|
170 |
+
norm_layer=norm_layer))
|
171 |
+
|
172 |
+
return nn.Sequential(*layers)
|
173 |
+
|
174 |
+
def forward(self, x):
|
175 |
+
x = self.conv1(x)
|
176 |
+
x = self.bn1(x)
|
177 |
+
x = self.relu(x)
|
178 |
+
x = self.maxpool(x)
|
179 |
+
|
180 |
+
x = self.layer1(x)
|
181 |
+
x = self.layer2(x)
|
182 |
+
x = self.layer3(x)
|
183 |
+
x = self.layer4(x)
|
184 |
+
|
185 |
+
x = self.avgpool(x)
|
186 |
+
x = torch.flatten(x, 1)
|
187 |
+
x = self.fc(x)
|
188 |
+
|
189 |
+
return x
|
190 |
+
|
191 |
+
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
|
192 |
+
model = ResNet(block, layers, **kwargs)
|
193 |
+
return model
|
194 |
+
|
195 |
+
def resnet34(pretrained=False, progress=True, **kwargs):
|
196 |
+
r"""ResNet-34 model from
|
197 |
+
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
198 |
+
|
199 |
+
Args:
|
200 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
201 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
202 |
+
"""
|
203 |
+
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
|
204 |
+
**kwargs)
|
models/transformer.py
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch
|
3 |
+
from models.util import mydownres2Dblock
|
4 |
+
import numpy as np
|
5 |
+
from models.util import AntiAliasInterpolation2d,make_coordinate_grid
|
6 |
+
from sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
|
7 |
+
import torch.nn.functional as F
|
8 |
+
import copy
|
9 |
+
|
10 |
+
|
11 |
+
class PositionalEncoding(nn.Module):
|
12 |
+
|
13 |
+
def __init__(self, d_hid, n_position=200):
|
14 |
+
super(PositionalEncoding, self).__init__()
|
15 |
+
|
16 |
+
# Not a parameter
|
17 |
+
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))
|
18 |
+
|
19 |
+
def _get_sinusoid_encoding_table(self, n_position, d_hid):
|
20 |
+
''' Sinusoid position encoding table '''
|
21 |
+
# TODO: make it with torch instead of numpy
|
22 |
+
|
23 |
+
def get_position_angle_vec(position):
|
24 |
+
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
|
25 |
+
|
26 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
|
27 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
28 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
29 |
+
|
30 |
+
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
|
31 |
+
|
32 |
+
def forward(self, winsize):
|
33 |
+
return self.pos_table[:, :winsize].clone().detach()
|
34 |
+
|
35 |
+
def _get_activation_fn(activation):
|
36 |
+
"""Return an activation function given a string"""
|
37 |
+
if activation == "relu":
|
38 |
+
return F.relu
|
39 |
+
if activation == "gelu":
|
40 |
+
return F.gelu
|
41 |
+
if activation == "glu":
|
42 |
+
return F.glu
|
43 |
+
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
44 |
+
|
45 |
+
def _get_clones(module, N):
|
46 |
+
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
47 |
+
|
48 |
+
class Transformer(nn.Module):
|
49 |
+
|
50 |
+
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
|
51 |
+
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
|
52 |
+
activation="relu", normalize_before=False,
|
53 |
+
return_intermediate_dec=True):
|
54 |
+
super().__init__()
|
55 |
+
|
56 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
|
57 |
+
dropout, activation, normalize_before)
|
58 |
+
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
59 |
+
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
60 |
+
|
61 |
+
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
|
62 |
+
dropout, activation, normalize_before)
|
63 |
+
decoder_norm = nn.LayerNorm(d_model)
|
64 |
+
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
|
65 |
+
return_intermediate=return_intermediate_dec)
|
66 |
+
|
67 |
+
self._reset_parameters()
|
68 |
+
|
69 |
+
self.d_model = d_model
|
70 |
+
self.nhead = nhead
|
71 |
+
|
72 |
+
def _reset_parameters(self):
|
73 |
+
for p in self.parameters():
|
74 |
+
if p.dim() > 1:
|
75 |
+
nn.init.xavier_uniform_(p)
|
76 |
+
|
77 |
+
def forward(self,opt, src, query_embed, pos_embed):
|
78 |
+
# flatten NxCxHxW to HWxNxC
|
79 |
+
|
80 |
+
src = src.permute(1, 0, 2)
|
81 |
+
pos_embed = pos_embed.permute(1, 0, 2)
|
82 |
+
query_embed = query_embed.permute(1, 0, 2)
|
83 |
+
|
84 |
+
tgt = torch.zeros_like(query_embed)
|
85 |
+
memory = self.encoder(src, pos=pos_embed)
|
86 |
+
|
87 |
+
hs = self.decoder(tgt, memory,
|
88 |
+
pos=pos_embed, query_pos=query_embed)
|
89 |
+
return hs
|
90 |
+
|
91 |
+
|
92 |
+
class TransformerEncoder(nn.Module):
|
93 |
+
|
94 |
+
def __init__(self, encoder_layer, num_layers, norm=None):
|
95 |
+
super().__init__()
|
96 |
+
self.layers = _get_clones(encoder_layer, num_layers)
|
97 |
+
self.num_layers = num_layers
|
98 |
+
self.norm = norm
|
99 |
+
|
100 |
+
def forward(self, src, mask = None, src_key_padding_mask = None, pos = None):
|
101 |
+
output = src+pos
|
102 |
+
|
103 |
+
for layer in self.layers:
|
104 |
+
output = layer(output, src_mask=mask,
|
105 |
+
src_key_padding_mask=src_key_padding_mask, pos=pos)
|
106 |
+
|
107 |
+
if self.norm is not None:
|
108 |
+
output = self.norm(output)
|
109 |
+
|
110 |
+
return output
|
111 |
+
|
112 |
+
|
113 |
+
class TransformerDecoder(nn.Module):
|
114 |
+
|
115 |
+
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
|
116 |
+
super().__init__()
|
117 |
+
self.layers = _get_clones(decoder_layer, num_layers)
|
118 |
+
self.num_layers = num_layers
|
119 |
+
self.norm = norm
|
120 |
+
self.return_intermediate = return_intermediate
|
121 |
+
|
122 |
+
def forward(self, tgt, memory, tgt_mask = None, memory_mask = None, tgt_key_padding_mask = None,
|
123 |
+
memory_key_padding_mask = None,
|
124 |
+
pos = None,
|
125 |
+
query_pos = None):
|
126 |
+
output = tgt+pos+query_pos
|
127 |
+
|
128 |
+
intermediate = []
|
129 |
+
|
130 |
+
for layer in self.layers:
|
131 |
+
output = layer(output, memory, tgt_mask=tgt_mask,
|
132 |
+
memory_mask=memory_mask,
|
133 |
+
tgt_key_padding_mask=tgt_key_padding_mask,
|
134 |
+
memory_key_padding_mask=memory_key_padding_mask,
|
135 |
+
pos=pos, query_pos=query_pos)
|
136 |
+
if self.return_intermediate:
|
137 |
+
intermediate.append(self.norm(output))
|
138 |
+
|
139 |
+
if self.norm is not None:
|
140 |
+
output = self.norm(output)
|
141 |
+
if self.return_intermediate:
|
142 |
+
intermediate.pop()
|
143 |
+
intermediate.append(output)
|
144 |
+
|
145 |
+
if self.return_intermediate:
|
146 |
+
return torch.stack(intermediate)
|
147 |
+
|
148 |
+
return output.unsqueeze(0)
|
149 |
+
|
150 |
+
|
151 |
+
class TransformerEncoderLayer(nn.Module):
|
152 |
+
|
153 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
154 |
+
activation="relu", normalize_before=False):
|
155 |
+
super().__init__()
|
156 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
157 |
+
# Implementation of Feedforward model
|
158 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
159 |
+
self.dropout = nn.Dropout(dropout)
|
160 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
161 |
+
|
162 |
+
self.norm1 = nn.LayerNorm(d_model)
|
163 |
+
self.norm2 = nn.LayerNorm(d_model)
|
164 |
+
self.dropout1 = nn.Dropout(dropout)
|
165 |
+
self.dropout2 = nn.Dropout(dropout)
|
166 |
+
|
167 |
+
self.activation = _get_activation_fn(activation)
|
168 |
+
self.normalize_before = normalize_before
|
169 |
+
|
170 |
+
def with_pos_embed(self, tensor, pos):
|
171 |
+
return tensor if pos is None else tensor + pos
|
172 |
+
|
173 |
+
def forward_post(self,
|
174 |
+
src,
|
175 |
+
src_mask = None,
|
176 |
+
src_key_padding_mask = None,
|
177 |
+
pos = None):
|
178 |
+
# q = k = self.with_pos_embed(src, pos)
|
179 |
+
src2 = self.self_attn(src, src, value=src, attn_mask=src_mask,
|
180 |
+
key_padding_mask=src_key_padding_mask)[0]
|
181 |
+
src = src + self.dropout1(src2)
|
182 |
+
src = self.norm1(src)
|
183 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
|
184 |
+
src = src + self.dropout2(src2)
|
185 |
+
src = self.norm2(src)
|
186 |
+
return src
|
187 |
+
|
188 |
+
def forward_pre(self, src,
|
189 |
+
src_mask = None,
|
190 |
+
src_key_padding_mask = None,
|
191 |
+
pos = None):
|
192 |
+
src2 = self.norm1(src)
|
193 |
+
# q = k = self.with_pos_embed(src2, pos)
|
194 |
+
src2 = self.self_attn(src2, src2, value=src2, attn_mask=src_mask,
|
195 |
+
key_padding_mask=src_key_padding_mask)[0]
|
196 |
+
src = src + self.dropout1(src2)
|
197 |
+
src2 = self.norm2(src)
|
198 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
|
199 |
+
src = src + self.dropout2(src2)
|
200 |
+
return src
|
201 |
+
|
202 |
+
def forward(self, src,
|
203 |
+
src_mask = None,
|
204 |
+
src_key_padding_mask = None,
|
205 |
+
pos = None):
|
206 |
+
if self.normalize_before:
|
207 |
+
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
|
208 |
+
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
|
209 |
+
|
210 |
+
|
211 |
+
class TransformerDecoderLayer(nn.Module):
|
212 |
+
|
213 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
214 |
+
activation="relu", normalize_before=False):
|
215 |
+
super().__init__()
|
216 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
217 |
+
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
218 |
+
# Implementation of Feedforward model
|
219 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
220 |
+
self.dropout = nn.Dropout(dropout)
|
221 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
222 |
+
|
223 |
+
self.norm1 = nn.LayerNorm(d_model)
|
224 |
+
self.norm2 = nn.LayerNorm(d_model)
|
225 |
+
self.norm3 = nn.LayerNorm(d_model)
|
226 |
+
self.dropout1 = nn.Dropout(dropout)
|
227 |
+
self.dropout2 = nn.Dropout(dropout)
|
228 |
+
self.dropout3 = nn.Dropout(dropout)
|
229 |
+
|
230 |
+
self.activation = _get_activation_fn(activation)
|
231 |
+
self.normalize_before = normalize_before
|
232 |
+
|
233 |
+
def with_pos_embed(self, tensor, pos):
|
234 |
+
return tensor if pos is None else tensor + pos
|
235 |
+
|
236 |
+
def forward_post(self, tgt, memory,
|
237 |
+
tgt_mask = None,
|
238 |
+
memory_mask = None,
|
239 |
+
tgt_key_padding_mask = None,
|
240 |
+
memory_key_padding_mask = None,
|
241 |
+
pos = None,
|
242 |
+
query_pos = None):
|
243 |
+
# q = k = self.with_pos_embed(tgt, query_pos)
|
244 |
+
tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask,
|
245 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
246 |
+
tgt = tgt + self.dropout1(tgt2)
|
247 |
+
tgt = self.norm1(tgt)
|
248 |
+
tgt2 = self.multihead_attn(query=tgt,
|
249 |
+
key=memory,
|
250 |
+
value=memory, attn_mask=memory_mask,
|
251 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
252 |
+
tgt = tgt + self.dropout2(tgt2)
|
253 |
+
tgt = self.norm2(tgt)
|
254 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
255 |
+
tgt = tgt + self.dropout3(tgt2)
|
256 |
+
tgt = self.norm3(tgt)
|
257 |
+
return tgt
|
258 |
+
|
259 |
+
def forward_pre(self, tgt, memory,
|
260 |
+
tgt_mask = None,
|
261 |
+
memory_mask = None,
|
262 |
+
tgt_key_padding_mask = None,
|
263 |
+
memory_key_padding_mask = None,
|
264 |
+
pos = None,
|
265 |
+
query_pos = None):
|
266 |
+
tgt2 = self.norm1(tgt)
|
267 |
+
# q = k = self.with_pos_embed(tgt2, query_pos)
|
268 |
+
tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask,
|
269 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
270 |
+
tgt = tgt + self.dropout1(tgt2)
|
271 |
+
tgt2 = self.norm2(tgt)
|
272 |
+
tgt2 = self.multihead_attn(query=tgt2,
|
273 |
+
key=memory,
|
274 |
+
value=memory, attn_mask=memory_mask,
|
275 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
276 |
+
tgt = tgt + self.dropout2(tgt2)
|
277 |
+
tgt2 = self.norm3(tgt)
|
278 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
279 |
+
tgt = tgt + self.dropout3(tgt2)
|
280 |
+
return tgt
|
281 |
+
|
282 |
+
def forward(self, tgt, memory,
|
283 |
+
tgt_mask = None,
|
284 |
+
memory_mask = None,
|
285 |
+
tgt_key_padding_mask = None,
|
286 |
+
memory_key_padding_mask = None,
|
287 |
+
pos = None,
|
288 |
+
query_pos = None):
|
289 |
+
if self.normalize_before:
|
290 |
+
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
|
291 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
292 |
+
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
|
293 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
294 |
+
|
295 |
+
|
296 |
+
|
297 |
+
class Audio2kpTransformer(nn.Module):
|
298 |
+
def __init__(self,opt):
|
299 |
+
super(Audio2kpTransformer, self).__init__()
|
300 |
+
self.opt = opt
|
301 |
+
|
302 |
+
|
303 |
+
self.embedding = nn.Embedding(41, opt.embedding_dim)
|
304 |
+
self.pos_enc = PositionalEncoding(512,20)
|
305 |
+
self.down_pose = AntiAliasInterpolation2d(1,0.25)
|
306 |
+
input_dim = 2
|
307 |
+
self.feature_extract = nn.Sequential(mydownres2Dblock(input_dim,32),
|
308 |
+
mydownres2Dblock(32,64),
|
309 |
+
mydownres2Dblock(64,128),
|
310 |
+
mydownres2Dblock(128,256),
|
311 |
+
mydownres2Dblock(256,512),
|
312 |
+
nn.AvgPool2d(2))
|
313 |
+
|
314 |
+
self.decode_dim = 70
|
315 |
+
self.audio_embedding = nn.Sequential(nn.ConvTranspose2d(1, 8, (29, 14), stride=(1, 1), padding=(0, 11)),
|
316 |
+
BatchNorm2d(8),
|
317 |
+
nn.ReLU(inplace=True),
|
318 |
+
nn.Conv2d(8, 35, (13, 13), stride=(1, 1), padding=(6, 6)))
|
319 |
+
self.decodefeature_extract = nn.Sequential(mydownres2Dblock(self.decode_dim,32),
|
320 |
+
mydownres2Dblock(32,64),
|
321 |
+
mydownres2Dblock(64,128),
|
322 |
+
mydownres2Dblock(128,256),
|
323 |
+
mydownres2Dblock(256,512),
|
324 |
+
nn.AvgPool2d(2))
|
325 |
+
|
326 |
+
self.transformer = Transformer()
|
327 |
+
self.kp = nn.Linear(512,opt.num_kp*2)
|
328 |
+
self.jacobian = nn.Linear(512,opt.num_kp*4)
|
329 |
+
self.jacobian.weight.data.zero_()
|
330 |
+
self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1] * self.opt.num_kp, dtype=torch.float))
|
331 |
+
self.criterion = nn.L1Loss()
|
332 |
+
|
333 |
+
def create_sparse_motions(self, source_image, kp_source):
|
334 |
+
"""
|
335 |
+
Eq 4. in the paper T_{s<-d}(z)
|
336 |
+
"""
|
337 |
+
bs, _, h, w = source_image.shape
|
338 |
+
identity_grid = make_coordinate_grid((h, w), type=kp_source['value'].type())
|
339 |
+
identity_grid = identity_grid.view(1, 1, h, w, 2)
|
340 |
+
coordinate_grid = identity_grid
|
341 |
+
if 'jacobian' in kp_source:
|
342 |
+
jacobian = kp_source['jacobian']
|
343 |
+
jacobian = jacobian.unsqueeze(-3).unsqueeze(-3)
|
344 |
+
jacobian = jacobian.repeat(1, 1, h, w, 1, 1)
|
345 |
+
coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))
|
346 |
+
coordinate_grid = coordinate_grid.squeeze(-1)
|
347 |
+
|
348 |
+
driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.opt.num_kp, 1, 1, 2)
|
349 |
+
|
350 |
+
#adding background feature
|
351 |
+
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1)
|
352 |
+
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)
|
353 |
+
|
354 |
+
return sparse_motions.permute(0,1,4,2,3).reshape(bs,(self.opt.num_kp+1)*2,64,64)
|
355 |
+
|
356 |
+
|
357 |
+
|
358 |
+
def forward(self,x, initial_kp = None):
|
359 |
+
bs,seqlen = x["ph"].shape
|
360 |
+
ph = x["ph"].reshape(bs*seqlen,1)
|
361 |
+
pose = x["pose"].reshape(bs*seqlen,1,256,256)
|
362 |
+
input_feature = self.down_pose(pose)
|
363 |
+
|
364 |
+
phoneme_embedding = self.embedding(ph.long())
|
365 |
+
phoneme_embedding = phoneme_embedding.reshape(bs*seqlen, 1, 16, 16)
|
366 |
+
phoneme_embedding = F.interpolate(phoneme_embedding, scale_factor=4)
|
367 |
+
input_feature = torch.cat((input_feature, phoneme_embedding), dim=1)
|
368 |
+
|
369 |
+
input_feature = self.feature_extract(input_feature).unsqueeze(-1).reshape(bs,seqlen,512)
|
370 |
+
|
371 |
+
audio = x["audio"].reshape(bs * seqlen, 1, 4, 41)
|
372 |
+
decoder_feature = self.audio_embedding(audio)
|
373 |
+
decoder_feature = F.interpolate(decoder_feature, scale_factor=2)
|
374 |
+
decoder_feature = self.decodefeature_extract(torch.cat(
|
375 |
+
(decoder_feature,
|
376 |
+
initial_kp["feature_map"].unsqueeze(1).repeat(1, seqlen, 1, 1, 1).reshape(bs * seqlen, 35, 64, 64)),
|
377 |
+
dim=1)).unsqueeze(-1).reshape(bs, seqlen, 512)
|
378 |
+
|
379 |
+
posi_em = self.pos_enc(self.opt.num_w*2+1)
|
380 |
+
|
381 |
+
|
382 |
+
out = {}
|
383 |
+
|
384 |
+
output_feature = self.transformer(self.opt,input_feature,decoder_feature,posi_em)[-1,self.opt.num_w]
|
385 |
+
|
386 |
+
out["value"] = self.kp(output_feature).reshape(bs,self.opt.num_kp,2)
|
387 |
+
out["jacobian"] = self.jacobian(output_feature).reshape(bs,self.opt.num_kp,2,2)
|
388 |
+
|
389 |
+
return out
|
390 |
+
|
391 |
+
|
models/util.py
ADDED
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn
|
2 |
+
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torch
|
5 |
+
import cv2
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
from models.resnet import resnet34
|
9 |
+
from models.layers.residual import Res2dBlock,Res1dBlock,DownRes2dBlock
|
10 |
+
|
11 |
+
from sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d
|
12 |
+
|
13 |
+
|
14 |
+
def myres2Dblock(indim,outdim,k_size = 3,padding = 1, normalize = "batch",nonlinearity = "relu",order = "NACNAC"):
|
15 |
+
return Res2dBlock(indim,outdim,k_size,padding,activation_norm_type=normalize,nonlinearity=nonlinearity,inplace_nonlinearity=True,order = order)
|
16 |
+
|
17 |
+
def myres1Dblock(indim,outdim,k_size = 3,padding = 1, normalize = "batch",nonlinearity = "relu",order = "NACNAC"):
|
18 |
+
return Res1dBlock(indim,outdim,k_size,padding,activation_norm_type=normalize,nonlinearity=nonlinearity,inplace_nonlinearity=True,order = order)
|
19 |
+
|
20 |
+
def mydownres2Dblock(indim,outdim,k_size = 3,padding = 1, normalize = "batch",nonlinearity = "leakyrelu",order = "NACNAC"):
|
21 |
+
return DownRes2dBlock(indim,outdim,k_size,padding=padding,activation_norm_type=normalize,nonlinearity=nonlinearity,inplace_nonlinearity=True,order = order)
|
22 |
+
|
23 |
+
def gaussian2kp(heatmap):
|
24 |
+
"""
|
25 |
+
Extract the mean and from a heatmap
|
26 |
+
"""
|
27 |
+
shape = heatmap.shape
|
28 |
+
heatmap = heatmap.unsqueeze(-1)
|
29 |
+
grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)
|
30 |
+
value = (heatmap * grid).sum(dim=(2, 3))
|
31 |
+
kp = {'value': value}
|
32 |
+
|
33 |
+
return kp
|
34 |
+
|
35 |
+
def kp2gaussian(kp, spatial_size, kp_variance):
|
36 |
+
"""
|
37 |
+
Transform a keypoint into gaussian like representation
|
38 |
+
"""
|
39 |
+
mean = kp['value'] #bs*numkp*2
|
40 |
+
|
41 |
+
coordinate_grid = make_coordinate_grid(spatial_size, mean.type()) #h*w*2
|
42 |
+
number_of_leading_dimensions = len(mean.shape) - 1
|
43 |
+
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape #1*1*h*w*2
|
44 |
+
coordinate_grid = coordinate_grid.view(*shape)
|
45 |
+
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)
|
46 |
+
coordinate_grid = coordinate_grid.repeat(*repeats) #bs*numkp*h*w*2
|
47 |
+
|
48 |
+
# Preprocess kp shape
|
49 |
+
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)
|
50 |
+
mean = mean.view(*shape)
|
51 |
+
|
52 |
+
mean_sub = (coordinate_grid - mean)
|
53 |
+
|
54 |
+
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
|
55 |
+
|
56 |
+
return out
|
57 |
+
|
58 |
+
|
59 |
+
def make_coordinate_grid(spatial_size, type):
|
60 |
+
"""
|
61 |
+
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
|
62 |
+
"""
|
63 |
+
h, w = spatial_size
|
64 |
+
x = torch.arange(w).type(type)
|
65 |
+
y = torch.arange(h).type(type)
|
66 |
+
|
67 |
+
x = (2 * (x / (w - 1)) - 1)
|
68 |
+
y = (2 * (y / (h - 1)) - 1)
|
69 |
+
|
70 |
+
yy = y.view(-1, 1).repeat(1, w)
|
71 |
+
xx = x.view(1, -1).repeat(h, 1)
|
72 |
+
|
73 |
+
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
|
74 |
+
|
75 |
+
return meshed
|
76 |
+
|
77 |
+
|
78 |
+
class ResBlock2d(nn.Module):
|
79 |
+
"""
|
80 |
+
Res block, preserve spatial resolution.
|
81 |
+
"""
|
82 |
+
|
83 |
+
def __init__(self, in_features, kernel_size, padding):
|
84 |
+
super(ResBlock2d, self).__init__()
|
85 |
+
self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
|
86 |
+
padding=padding)
|
87 |
+
self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
|
88 |
+
padding=padding)
|
89 |
+
self.norm1 = BatchNorm2d(in_features, affine=True)
|
90 |
+
self.norm2 = BatchNorm2d(in_features, affine=True)
|
91 |
+
|
92 |
+
def forward(self, x):
|
93 |
+
out = self.norm1(x)
|
94 |
+
out = F.relu(out,inplace=True)
|
95 |
+
out = self.conv1(out)
|
96 |
+
out = self.norm2(out)
|
97 |
+
out = F.relu(out,inplace=True)
|
98 |
+
out = self.conv2(out)
|
99 |
+
out += x
|
100 |
+
return out
|
101 |
+
|
102 |
+
|
103 |
+
class UpBlock2d(nn.Module):
|
104 |
+
"""
|
105 |
+
Upsampling block for use in decoder.
|
106 |
+
"""
|
107 |
+
|
108 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
109 |
+
super(UpBlock2d, self).__init__()
|
110 |
+
|
111 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
112 |
+
padding=padding, groups=groups)
|
113 |
+
self.norm = BatchNorm2d(out_features, affine=True)
|
114 |
+
|
115 |
+
def forward(self, x):
|
116 |
+
out = F.interpolate(x, scale_factor=2)
|
117 |
+
del x
|
118 |
+
out = self.conv(out)
|
119 |
+
out = self.norm(out)
|
120 |
+
out = F.relu(out,inplace=True)
|
121 |
+
return out
|
122 |
+
|
123 |
+
|
124 |
+
class DownBlock2d(nn.Module):
|
125 |
+
"""
|
126 |
+
Downsampling block for use in encoder.
|
127 |
+
"""
|
128 |
+
|
129 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
130 |
+
super(DownBlock2d, self).__init__()
|
131 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
132 |
+
padding=padding, groups=groups)
|
133 |
+
self.norm = BatchNorm2d(out_features, affine=True)
|
134 |
+
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
|
135 |
+
|
136 |
+
def forward(self, x):
|
137 |
+
out = self.conv(x)
|
138 |
+
del x
|
139 |
+
out = self.norm(out)
|
140 |
+
out = F.relu(out,inplace=True)
|
141 |
+
out = self.pool(out)
|
142 |
+
return out
|
143 |
+
|
144 |
+
|
145 |
+
class SameBlock2d(nn.Module):
|
146 |
+
"""
|
147 |
+
Simple block, preserve spatial resolution.
|
148 |
+
"""
|
149 |
+
|
150 |
+
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):
|
151 |
+
super(SameBlock2d, self).__init__()
|
152 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
|
153 |
+
kernel_size=kernel_size, padding=padding, groups=groups)
|
154 |
+
self.norm = BatchNorm2d(out_features, affine=True)
|
155 |
+
|
156 |
+
def forward(self, x):
|
157 |
+
out = self.conv(x)
|
158 |
+
out = self.norm(out)
|
159 |
+
out = F.relu(out,inplace=True)
|
160 |
+
return out
|
161 |
+
|
162 |
+
|
163 |
+
class Encoder(nn.Module):
|
164 |
+
"""
|
165 |
+
Hourglass Encoder
|
166 |
+
"""
|
167 |
+
|
168 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
169 |
+
super(Encoder, self).__init__()
|
170 |
+
|
171 |
+
down_blocks = []
|
172 |
+
for i in range(num_blocks):
|
173 |
+
down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
|
174 |
+
min(max_features, block_expansion * (2 ** (i + 1))),
|
175 |
+
kernel_size=3, padding=1))
|
176 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
177 |
+
|
178 |
+
def forward(self, x):
|
179 |
+
outs = [x]
|
180 |
+
for down_block in self.down_blocks:
|
181 |
+
outs.append(down_block(outs[-1]))
|
182 |
+
return outs
|
183 |
+
|
184 |
+
class Decoder(nn.Module):
|
185 |
+
"""
|
186 |
+
Hourglass Decoder
|
187 |
+
"""
|
188 |
+
|
189 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
190 |
+
super(Decoder, self).__init__()
|
191 |
+
|
192 |
+
up_blocks = []
|
193 |
+
|
194 |
+
for i in range(num_blocks)[::-1]:
|
195 |
+
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
|
196 |
+
out_filters = min(max_features, block_expansion * (2 ** i))
|
197 |
+
up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))
|
198 |
+
|
199 |
+
self.up_blocks = nn.ModuleList(up_blocks)
|
200 |
+
self.out_filters = block_expansion + in_features
|
201 |
+
|
202 |
+
def forward(self, x):
|
203 |
+
out = x.pop()
|
204 |
+
for up_block in self.up_blocks:
|
205 |
+
out = up_block(out)
|
206 |
+
skip = x.pop()
|
207 |
+
out = torch.cat([out, skip], dim=1)
|
208 |
+
return out
|
209 |
+
|
210 |
+
class Hourglass(nn.Module):
|
211 |
+
"""
|
212 |
+
Hourglass architecture.
|
213 |
+
"""
|
214 |
+
|
215 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
216 |
+
super(Hourglass, self).__init__()
|
217 |
+
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
|
218 |
+
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
|
219 |
+
self.out_filters = self.decoder.out_filters
|
220 |
+
|
221 |
+
def forward(self, x):
|
222 |
+
return self.decoder(self.encoder(x))
|
223 |
+
|
224 |
+
class AntiAliasInterpolation2d(nn.Module):
|
225 |
+
"""
|
226 |
+
Band-limited downsampling, for better preservation of the input signal.
|
227 |
+
"""
|
228 |
+
def __init__(self, channels, scale):
|
229 |
+
super(AntiAliasInterpolation2d, self).__init__()
|
230 |
+
sigma = (1 / scale - 1) / 2
|
231 |
+
kernel_size = 2 * round(sigma * 4) + 1
|
232 |
+
self.ka = kernel_size // 2
|
233 |
+
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
|
234 |
+
|
235 |
+
|
236 |
+
kernel_size = [kernel_size, kernel_size]
|
237 |
+
sigma = [sigma, sigma]
|
238 |
+
# The gaussian kernel is the product of the
|
239 |
+
# gaussian function of each dimension.
|
240 |
+
kernel = 1
|
241 |
+
meshgrids = torch.meshgrid(
|
242 |
+
[
|
243 |
+
torch.arange(size, dtype=torch.float32)
|
244 |
+
for size in kernel_size
|
245 |
+
]
|
246 |
+
)
|
247 |
+
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
|
248 |
+
mean = (size - 1) / 2
|
249 |
+
kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
|
250 |
+
|
251 |
+
# Make sure sum of values in gaussian kernel equals 1.
|
252 |
+
kernel = kernel / torch.sum(kernel)
|
253 |
+
# Reshape to depthwise convolutional weight
|
254 |
+
kernel = kernel.view(1, 1, *kernel.size())
|
255 |
+
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
|
256 |
+
|
257 |
+
self.register_buffer('weight', kernel)
|
258 |
+
self.groups = channels
|
259 |
+
self.scale = scale
|
260 |
+
|
261 |
+
def forward(self, input):
|
262 |
+
if self.scale == 1.0:
|
263 |
+
return input
|
264 |
+
|
265 |
+
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
|
266 |
+
out = F.conv2d(out, weight=self.weight, groups=self.groups)
|
267 |
+
out = F.interpolate(out, scale_factor=(self.scale, self.scale))
|
268 |
+
|
269 |
+
return out
|
270 |
+
|
271 |
+
def draw_annotation_box( image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
|
272 |
+
"""Draw a 3D box as annotation of pose"""
|
273 |
+
|
274 |
+
camera_matrix = np.array(
|
275 |
+
[[233.333, 0, 128],
|
276 |
+
[0, 233.333, 128],
|
277 |
+
[0, 0, 1]], dtype="double")
|
278 |
+
|
279 |
+
dist_coeefs = np.zeros((4, 1))
|
280 |
+
|
281 |
+
point_3d = []
|
282 |
+
rear_size = 75
|
283 |
+
rear_depth = 0
|
284 |
+
point_3d.append((-rear_size, -rear_size, rear_depth))
|
285 |
+
point_3d.append((-rear_size, rear_size, rear_depth))
|
286 |
+
point_3d.append((rear_size, rear_size, rear_depth))
|
287 |
+
point_3d.append((rear_size, -rear_size, rear_depth))
|
288 |
+
point_3d.append((-rear_size, -rear_size, rear_depth))
|
289 |
+
|
290 |
+
front_size = 100
|
291 |
+
front_depth = 100
|
292 |
+
point_3d.append((-front_size, -front_size, front_depth))
|
293 |
+
point_3d.append((-front_size, front_size, front_depth))
|
294 |
+
point_3d.append((front_size, front_size, front_depth))
|
295 |
+
point_3d.append((front_size, -front_size, front_depth))
|
296 |
+
point_3d.append((-front_size, -front_size, front_depth))
|
297 |
+
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
|
298 |
+
|
299 |
+
# Map to 2d image points
|
300 |
+
(point_2d, _) = cv2.projectPoints(point_3d,
|
301 |
+
rotation_vector,
|
302 |
+
translation_vector,
|
303 |
+
camera_matrix,
|
304 |
+
dist_coeefs)
|
305 |
+
point_2d = np.int32(point_2d.reshape(-1, 2))
|
306 |
+
|
307 |
+
# Draw all the lines
|
308 |
+
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
|
309 |
+
cv2.line(image, tuple(point_2d[1]), tuple(
|
310 |
+
point_2d[6]), color, line_width, cv2.LINE_AA)
|
311 |
+
cv2.line(image, tuple(point_2d[2]), tuple(
|
312 |
+
point_2d[7]), color, line_width, cv2.LINE_AA)
|
313 |
+
cv2.line(image, tuple(point_2d[3]), tuple(
|
314 |
+
point_2d[8]), color, line_width, cv2.LINE_AA)
|
315 |
+
|
316 |
+
|
317 |
+
|
318 |
+
class up_sample(nn.Module):
|
319 |
+
def __init__(self, scale_factor):
|
320 |
+
super(up_sample, self).__init__()
|
321 |
+
self.interp = nn.functional.interpolate
|
322 |
+
self.scale_factor = scale_factor
|
323 |
+
|
324 |
+
def forward(self, x):
|
325 |
+
x = self.interp(x, scale_factor=self.scale_factor,mode = 'linear',align_corners = True)
|
326 |
+
return x
|
327 |
+
|
328 |
+
|
329 |
+
|
330 |
+
class MyResNet34(nn.Module):
|
331 |
+
def __init__(self,embedding_dim,input_channel = 3):
|
332 |
+
super(MyResNet34, self).__init__()
|
333 |
+
self.resnet = resnet34(norm_layer = BatchNorm2d,num_classes=embedding_dim,input_channel = input_channel)
|
334 |
+
def forward(self, x):
|
335 |
+
return self.resnet(x)
|
336 |
+
|
337 |
+
|
338 |
+
|
339 |
+
class ImagePyramide(torch.nn.Module):
|
340 |
+
"""
|
341 |
+
Create image pyramide for computing pyramide perceptual loss. See Sec 3.3
|
342 |
+
"""
|
343 |
+
def __init__(self, scales, num_channels):
|
344 |
+
super(ImagePyramide, self).__init__()
|
345 |
+
downs = {}
|
346 |
+
for scale in scales:
|
347 |
+
downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale)
|
348 |
+
self.downs = nn.ModuleDict(downs)
|
349 |
+
|
350 |
+
def forward(self, x):
|
351 |
+
out_dict = {}
|
352 |
+
for scale, down_module in self.downs.items():
|
353 |
+
out_dict['prediction_' + str(scale).replace('-', '.')] = down_module(x)
|
354 |
+
return out_dict
|