'''
Author: lhw
Date: 2020-11-06 14:50:26
LastEditTime: 2020-11-06 17:29:01
LastEditors: Please set LastEditors
Description: 编辑ResNetvd
FilePath: /mtl-text-recognition/modules/resnet_vd.py
'''
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from torchsummary import summary

class ConvBNLayer(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, act=None, is_vd_mode=False, dilation=1, name=None):
        super().__init__()
        self.is_vd_mode = is_vd_mode
        # self._pool2d_avg = nn.AdaptiveAvgPool2d(2)
        self._conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                              stride=stride, padding=(kernel_size - 1) // 2 if dilation == 1 else 0, groups=groups,
                              bias=False)
        self._batch_norm = nn.BatchNorm2d(out_channels)
        if act == 'relu':
            self._act_op = nn.ReLU()
        else:
            self._act_op = None
        
    def forward(self, x):
        if self.is_vd_mode:
            x = self._pool2d_avg(x)
        x = self._conv(x)
        x = self._batch_norm(x)
        if self._act_op is not None:
            x = self._act_op(x)
        return x

        
class BottleneckBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride, shortcut=True, if_first=False, dilation=1, name=None):
        super(BottleneckBlock, self).__init__()
        self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=1, act='relu', name=name+'_branch2a')
        self.dilation = dilation
        self.conv1 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, act='relu', name=name+'_branch2b')
        self.conv2 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels * 4, kernel_size=1, act='None', name=name+'_branch2c')
        if not shortcut:
            self.shot = ConvBNLayer(in_channels=in_channels, out_channels=out_channels * 4, kernel_size=1, is_vd_mode=False if if_first or stride == 1 else True, name=name+'_branch1')
        self.shortcut = shortcut

    def forward(self, x):
        x = self.conv0(x)
        ####################################################################
        # If given dilation rate > 1, using corresponding padding.
        # The performance drops down without the follow padding.
        if self.dilation > 1:
            padding = self.dilation
            x = F.pad(x, [padding, padding, padding, padding])
        #####################################################################
        x = self.conv1(x)
        x = self.conv2(x)

        if self.shortcut:
            shot = self.shot(x)
        x = torch.add(shot) 
        out = nn.ReLU(x)

        return out

class BasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride, shortcut=True, if_first=False, name=None):
        super(BasicBlock, self).__init__()
        self.stride = stride
        self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, act='relu', name=name+'_branch2a')
        self.conv1 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, act='relu', name=name+'_branch2b')
        if not shortcut:
            self.shot = ConvBNLayer(in_channels=in_channels, out_channels=out_channels * 4, kernel_size=1, is_vd_mode=False if if_first or stride == 1 else True, name=name+'_branch1')
        self.shortcut = shortcut

    def forward(self, x):
        inputs = x
        x = self.conv0(x)
        x = self.conv1(x)

        if self.shortcut:
            shot = self.shot(x)
        else:
            shot = self.shot(inputs)
        x += shot
        out = nn.ReLU(x)

        return out


class ResNet_vd(nn.Module):
    def __init__(self, layers=34, output_stride=None, lr_mult_list=(0.1, 0.1, 0.2, 0.2)):
        super(ResNet_vd, self).__init__()
        self.layers = layers
        self.lr_mult_list = lr_mult_list
        supported_layers = [18, 34, 50]
        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        
        num_channels = [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

        # for channels of four returned stages
        self.feat_channels = [c * 4 for c in num_filters] if layers >= 50 else num_filters

        dilation_dict = None
        if output_stride == 8:
            dilation_dict = {2: 2, 3: 4}
        elif output_stride == 16:
            dilation_dict = {3: 2}


        self.conv1_1 = ConvBNLayer(in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu', name='conv1_1')
        self.conv1_2 = ConvBNLayer(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', name='conv1_2')
        self.conv1_3 = ConvBNLayer(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', name='conv1_3')
        self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)


        self.stage_list = []

        for block in range(len(depth)):
            shortcut = False
            block_list = []
            for i in range(depth[block]):
                conv_name = "res" + str(block + 2) + chr(97 + i)
                basic_block = self.add_module(
                    'bb_%d_%d' % (block, i),
                    BasicBlock(
                        in_channels=num_channels[block]
                        if i == 0 else num_filters[block],
                        out_channels=num_filters[block],
                        stride=2 if i == 0 and block != 0 else 1,
                        shortcut=shortcut,
                        if_first=block == i == 0,
                        name=conv_name))
                block_list.append(basic_block)
                shortcut = True
            self.stage_list.append(block_list)
    def forward(self, inputs):
            y = self.conv1_1(inputs)
            y = self.conv1_2(y)
            y = self.conv1_3(y)
            y = self.pool2d_max(y)

            # A feature list saves the output feature map of each stage.
            feat_list = []
            for stage in self.stage_list:
                print(stage)
                for block in stage:
                    print(block)
                    y = block(y)
                feat_list.append(y)

            return feat_list




def numel(model):
    return sum(p.numel() for p in model.parameters())


if __name__ == "__main__":
    x = torch.randn(3, 3, 32, 512)
    # x = torch.cuda.FloatTensor(x)
    net = ResNet_vd(layers=34, output_stride=8)
    print(net)
    summary(net, (3, 32, 512))

    # encoder_feat = net(x)
    # print(encoder_feat.shape)
    # num_params = numel(encoder_feat)
    # print('Model params: {:4f}M'.format(num_params * 4 / 1024 / 1024))
