File size: 3,758 Bytes
d6ec83b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from model import common
import torch.nn as nn
from lambda_networks import LambdaLayer
import torch.cuda.amp as amp

def make_model(args, parent=False):
    return LAMBDANET(args)

class LAMBDANET(nn.Module):
    def __init__(self, args, conv=common.default_conv):
        super(LAMBDANET, self).__init__()

        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3 
        scale = args.scale[0]

        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)
        self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
        layer = LambdaLayer(
            dim = n_feats,
            dim_out = n_feats,
            r = 23,         # the receptive field for relative positional encoding (23 x 23)
            dim_k = 16,
            heads = 4,
            dim_u = 4
        )
        # msa = attention.PyramidAttention()
        # define head module
        m_head = [conv(args.n_colors, n_feats, kernel_size)]
        
        # define body module
        m_body = [
            common.ResBlock(
                conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale
            ) for _ in range(n_resblocks//2)
        ]
        # m_body.append(msa)
        m_body.append(layer)
        for i in range(n_resblocks//2):
            m_body.append(common.ResBlock(conv,n_feats,kernel_size,nn.PReLU(),res_scale=args.res_scale))
      
        m_body.append(conv(n_feats, n_feats, kernel_size))

        # define tail module
        #m_tail = [
        #    common.Upsampler(conv, scale, n_feats, act=False),
        #    conv(n_feats, args.n_colors, kernel_size)
        #]
        m_tail = [
            conv(n_feats, args.n_colors, kernel_size)
        ]

        self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)
        self.tail = nn.Sequential(*m_tail)

        self.recurrence = args.recurrence
        self.detach = args.detach
        # self.step_detach = args.step_detach
        self.amp = args.amp

    def forward(self, x):
        #x = self.sub_mean(x)
        with amp.autocast(self.amp):
            last_output=x
            output_lst=[None]*self.recurrence
            for i in range(self.recurrence):
                if self.detach:
                    last_output=last_output.detach()
                out = self.head(last_output)
                res = self.body(out)
                res += out
                output = self.tail(res) + last_output
                output_lst[i]=output
                last_output=output
            #x = self.add_mean(x)
            return output_lst

    def load_state_dict(self, state_dict, strict=True):
        own_state = self.state_dict()
        for name, param in state_dict.items():
            if name in own_state:
                if isinstance(param, nn.Parameter):
                    param = param.data
                try:
                    own_state[name].copy_(param)
                except Exception:
                    if name.find('tail') == -1:
                        raise RuntimeError('While copying the parameter named {}, '
                                           'whose dimensions in the model are {} and '
                                           'whose dimensions in the checkpoint are {}.'
                                           .format(name, own_state[name].size(), param.size()))
            elif strict:
                if name.find('tail') == -1:
                    raise KeyError('unexpected key "{}" in state_dict'
                                   .format(name))