Delete rrdbnet_arch.py
Browse files- rrdbnet_arch.py +0 -121
rrdbnet_arch.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn as nn
|
3 |
-
from torch.nn import functional as F
|
4 |
-
|
5 |
-
from arch_util import default_init_weights, make_layer, pixel_unshuffle
|
6 |
-
|
7 |
-
|
8 |
-
class ResidualDenseBlock(nn.Module):
|
9 |
-
"""Residual Dense Block.
|
10 |
-
|
11 |
-
Used in RRDB block in ESRGAN.
|
12 |
-
|
13 |
-
Args:
|
14 |
-
num_feat (int): Channel number of intermediate features.
|
15 |
-
num_grow_ch (int): Channels for each growth.
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(self, num_feat=64, num_grow_ch=32):
|
19 |
-
super(ResidualDenseBlock, self).__init__()
|
20 |
-
self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
|
21 |
-
self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
|
22 |
-
self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
23 |
-
self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
24 |
-
self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
|
25 |
-
|
26 |
-
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
27 |
-
|
28 |
-
# initialization
|
29 |
-
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
|
30 |
-
|
31 |
-
def forward(self, x):
|
32 |
-
x1 = self.lrelu(self.conv1(x))
|
33 |
-
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
|
34 |
-
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
|
35 |
-
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
|
36 |
-
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
|
37 |
-
# Emperically, we use 0.2 to scale the residual for better performance
|
38 |
-
return x5 * 0.2 + x
|
39 |
-
|
40 |
-
|
41 |
-
class RRDB(nn.Module):
|
42 |
-
"""Residual in Residual Dense Block.
|
43 |
-
|
44 |
-
Used in RRDB-Net in ESRGAN.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
num_feat (int): Channel number of intermediate features.
|
48 |
-
num_grow_ch (int): Channels for each growth.
|
49 |
-
"""
|
50 |
-
|
51 |
-
def __init__(self, num_feat, num_grow_ch=32):
|
52 |
-
super(RRDB, self).__init__()
|
53 |
-
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
|
54 |
-
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
|
55 |
-
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
|
56 |
-
|
57 |
-
def forward(self, x):
|
58 |
-
out = self.rdb1(x)
|
59 |
-
out = self.rdb2(out)
|
60 |
-
out = self.rdb3(out)
|
61 |
-
# Emperically, we use 0.2 to scale the residual for better performance
|
62 |
-
return out * 0.2 + x
|
63 |
-
|
64 |
-
|
65 |
-
class RRDBNet(nn.Module):
|
66 |
-
"""Networks consisting of Residual in Residual Dense Block, which is used
|
67 |
-
in ESRGAN.
|
68 |
-
|
69 |
-
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
|
70 |
-
|
71 |
-
We extend ESRGAN for scale x2 and scale x1.
|
72 |
-
Note: This is one option for scale 1, scale 2 in RRDBNet.
|
73 |
-
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
|
74 |
-
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
|
75 |
-
|
76 |
-
Args:
|
77 |
-
num_in_ch (int): Channel number of inputs.
|
78 |
-
num_out_ch (int): Channel number of outputs.
|
79 |
-
num_feat (int): Channel number of intermediate features.
|
80 |
-
Default: 64
|
81 |
-
num_block (int): Block number in the trunk network. Defaults: 23
|
82 |
-
num_grow_ch (int): Channels for each growth. Default: 32.
|
83 |
-
"""
|
84 |
-
|
85 |
-
def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
|
86 |
-
super(RRDBNet, self).__init__()
|
87 |
-
self.scale = scale
|
88 |
-
if scale == 2:
|
89 |
-
num_in_ch = num_in_ch * 4
|
90 |
-
elif scale == 1:
|
91 |
-
num_in_ch = num_in_ch * 16
|
92 |
-
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
93 |
-
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
|
94 |
-
self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
95 |
-
# upsample
|
96 |
-
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
97 |
-
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
98 |
-
if scale == 8:
|
99 |
-
self.conv_up3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
100 |
-
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
101 |
-
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
102 |
-
|
103 |
-
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
104 |
-
|
105 |
-
def forward(self, x):
|
106 |
-
if self.scale == 2:
|
107 |
-
feat = pixel_unshuffle(x, scale=2)
|
108 |
-
elif self.scale == 1:
|
109 |
-
feat = pixel_unshuffle(x, scale=4)
|
110 |
-
else:
|
111 |
-
feat = x
|
112 |
-
feat = self.conv_first(feat)
|
113 |
-
body_feat = self.conv_body(self.body(feat))
|
114 |
-
feat = feat + body_feat
|
115 |
-
# upsample
|
116 |
-
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
|
117 |
-
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
|
118 |
-
if self.scale == 8:
|
119 |
-
feat = self.lrelu(self.conv_up3(F.interpolate(feat, scale_factor=2, mode='nearest')))
|
120 |
-
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
|
121 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|