maze
/

maze commited on
Commit
f71ae19
1 Parent(s): 92e5aa4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +106 -0
README.md CHANGED
@@ -1,3 +1,109 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+ Model weight for Fast Style Transfer
5
+
6
+ ```
7
+
8
+ class TransformerNetwork(nn.Module):
9
+ """Feedforward Transformation Network without Tanh
10
+ reference: https://arxiv.org/abs/1603.08155
11
+ exact architecture: https://cs.stanford.edu/people/jcjohns/papers/fast-style/fast-style-supp.pdf
12
+ """
13
+ def __init__(self, tanh_multiplier=None):
14
+ super(TransformerNetwork, self).__init__()
15
+ self.ConvBlock = nn.Sequential(
16
+ ConvLayer(3, 32, 9, 1),
17
+ nn.ReLU(),
18
+ ConvLayer(32, 64, 3, 2),
19
+ nn.ReLU(),
20
+ ConvLayer(64, 128, 3, 2),
21
+ nn.ReLU()
22
+ )
23
+ self.ResidualBlock = nn.Sequential(
24
+ ResidualLayer(128, 3),
25
+ ResidualLayer(128, 3),
26
+ ResidualLayer(128, 3),
27
+ ResidualLayer(128, 3),
28
+ ResidualLayer(128, 3)
29
+ )
30
+ self.DeconvBlock = nn.Sequential(
31
+ DeconvLayer(128, 64, 3, 2, 1),
32
+ nn.ReLU(),
33
+ DeconvLayer(64, 32, 3, 2, 1),
34
+ nn.ReLU(),
35
+ ConvLayer(32, 3, 9, 1, norm="None")
36
+ )
37
+ self.tanh_multiplier = tanh_multiplier
38
+
39
+ def forward(self, x):
40
+ x = self.ConvBlock(x)
41
+ x = self.ResidualBlock(x)
42
+ x = self.DeconvBlock(x)
43
+ if isinstance(self.tanh_multiplier, int):
44
+ x = self.tanh_multiplier * F.tanh(x)
45
+ return x
46
+
47
+ class ConvLayer(nn.Module):
48
+ def __init__(self, in_channels, out_channels, kernel_size, stride, norm="instance"):
49
+ super(ConvLayer, self).__init__()
50
+ # Padding Layers
51
+ padding_size = kernel_size // 2
52
+ self.pad = nn.ReflectionPad2d(padding_size)
53
+
54
+ # Convolution Layer
55
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
56
+
57
+ # Normalization Layers
58
+ if norm == "instance":
59
+ self.norm = nn.InstanceNorm2d(out_channels, affine=True)
60
+ elif norm == "batch":
61
+ self.norm = nn.BatchNorm2d(out_channels, affine=True)
62
+ else:
63
+ self.norm = nn.Identity()
64
+
65
+ def forward(self, x):
66
+ x = self.pad(x)
67
+ x = self.conv(x)
68
+ x = self.norm(x)
69
+ return x
70
+
71
+ class ResidualLayer(nn.Module):
72
+ """
73
+ Deep Residual Learning for Image Recognition
74
+ https://arxiv.org/abs/1512.03385
75
+ """
76
+ def __init__(self, channels=128, kernel_size=3):
77
+ super(ResidualLayer, self).__init__()
78
+ self.conv1 = ConvLayer(channels, channels, kernel_size, stride=1)
79
+ self.relu = nn.ReLU()
80
+ self.conv2 = ConvLayer(channels, channels, kernel_size, stride=1)
81
+
82
+ def forward(self, x):
83
+ identity = x # preserve residual
84
+ out = self.relu(self.conv1(x)) # 1st conv layer + activation
85
+ out = self.conv2(out) # 2nd conv layer
86
+ out = out + identity # add residual
87
+ return out
88
+
89
+ class DeconvLayer(nn.Module):
90
+ def __init__(self, in_channels, out_channels, kernel_size, stride, output_padding, norm="instance"):
91
+ super(DeconvLayer, self).__init__()
92
+
93
+ # Transposed Convolution
94
+ padding_size = kernel_size // 2
95
+ self.conv_transpose = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding_size, output_padding)
96
+
97
+ # Normalization Layers
98
+ if norm == "instance":
99
+ self.norm = nn.InstanceNorm2d(out_channels, affine=True)
100
+ elif norm == "batch":
101
+ self.norm = nn.BatchNorm2d(out_channels, affine=True)
102
+ else:
103
+ self.norm = nn.Identity()
104
+
105
+ def forward(self, x):
106
+ x = self.conv_transpose(x)
107
+ out = self.norm(x)
108
+ return out
109
+ ```