'''
Author: SlytherinGe
LastEditTime: 2021-04-01 15:37:37
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
from mmcv.runner import load_checkpoint

from mmdet.utils import get_root_logger
if __name__ == '__main__':
    from mmdet.models.builder import BACKBONES
else:
    from ..builder import BACKBONES


def conv3x3(in_planes, out_planes, dilation=1):
    """3x3 convolution with padding."""
    return nn.Conv2d(
        in_planes,
        out_planes,
        kernel_size=3,
        padding=dilation,
        dilation=dilation)


def make_vgg_layer(inplanes,
                   planes,
                   num_blocks,
                   dilation=1,
                   with_bn=False,
                   ceil_mode=False):
    layers = []
    for _ in range(num_blocks):
        layers.append(conv3x3(inplanes, planes, dilation))
        if with_bn:
            layers.append(nn.BatchNorm2d(planes))
        layers.append(nn.ReLU(inplace=True))
        inplanes = planes
    layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))

    return layers


@BACKBONES.register_module()
class TwoWay_SSDVGG(nn.Module):
    """VGG Backbone network for single-shot-detection.

    Args:
        input_size (int): width and height of input, from {300, 512}.
        depth (int): Depth of vgg, from {11, 13, 16, 19}.
        out_indices (Sequence[int]): Output from which stages.

    Example:
        >>> self = TwoWay_SSDVGG(input_size=300, depth=11)
        >>> self.eval()
        >>> inputs = torch.rand(1, 3, 300, 300)
        >>> level_outputs = self.forward(inputs)
        >>> for level_out in level_outputs:
        ...     print(tuple(level_out.shape))
        (1, 1024, 19, 19)
        (1, 512, 10, 10)
        (1, 256, 5, 5)
        (1, 256, 3, 3)
        (1, 256, 1, 1)
    """
    extra_setting = {
        300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
        512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
    }
    arch_settings = {
        11: (1, 1, 2, 2, 2),
        13: (2, 2, 2, 2, 2),
        16: (2, 2, 3, 3, 3),
        19: (2, 2, 4, 4, 4)
    }
    fus_settings = {
        300: (512, 1024, 512, 256, 256, 256),
        512: (512, 1024, 512, 256, 256, 256, 256)
    }

    def __init__(self,
                 input_size,
                 depth,
                 in_channels=3,
                 with_last_pool=False,
                 ceil_mode=True,
                 out_indices=(3, 4),
                 out_feature_indices=(22, 34),
                 frozen_stages=-1,
                 l2_norm_scale=20.):
        # TODO: in_channels for mmcv.VGG
        super(TwoWay_SSDVGG, self).__init__()
        assert input_size in (300, 512)
        self.input_size = input_size
        # base init
        if depth not in self.arch_settings:
            raise KeyError(f'invalid depth {depth} for vgg')
        stage_blocks = self.arch_settings[depth]
        self.stage_blocks = stage_blocks
        self.out_indices = out_indices
        self.frozen_stages = frozen_stages
        self.fus_layer_channels = self.fus_settings[input_size]
        self.bn_eval = True
        self.bn_frozen = False

        self.inplanes = 3
        start_idx = 0
        vgg_layers = []
        fus_layers = []
        self.range_sub_modules = []
        dilations=(1, 1, 1, 1, 1)
        for i, num_blocks in enumerate(self.stage_blocks):
            num_modules = num_blocks * 2 + 1
            end_idx = start_idx + num_modules
            dilation = dilations[i]
            planes = 64 * 2**i if i < 4 else 512
            vgg_layer = make_vgg_layer(
                self.inplanes,
                planes,
                num_blocks,
                dilation=dilation,
                with_bn=False,
                ceil_mode=ceil_mode)
            vgg_layers.extend(vgg_layer)
            self.inplanes = planes
            self.range_sub_modules.append([start_idx, end_idx])
            start_idx = end_idx
        if not with_last_pool:
            vgg_layers.pop(-1)
            self.range_sub_modules[-1][1] -= 1
        self.add_module('features1', nn.Sequential(*vgg_layers))
        self.add_module('features2', nn.Sequential(*vgg_layers))

        # extra init
        # features1
        self.features1.add_module(
            str(len(self.features1)),
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
        self.features1.add_module(
            str(len(self.features1)),
            nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
        self.features1.add_module(
            str(len(self.features1)), nn.ReLU(inplace=True))
        self.features1.add_module(
            str(len(self.features1)), nn.Conv2d(1024, 1024, kernel_size=1))
        self.features1.add_module(
            str(len(self.features1)), nn.ReLU(inplace=True))
        # features2
        self.features2.add_module(
            str(len(self.features2)),
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
        self.features2.add_module(
            str(len(self.features2)),
            nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
        self.features2.add_module(
            str(len(self.features2)), nn.ReLU(inplace=True))
        self.features2.add_module(
            str(len(self.features2)), nn.Conv2d(1024, 1024, kernel_size=1))
        self.features2.add_module(
            str(len(self.features2)), nn.ReLU(inplace=True))

        self.out_feature_indices = out_feature_indices

        self.inplanes = 1024
        self.extra1 = self._make_extra_layers(self.extra_setting[input_size])
        self.extra2 = self._make_extra_layers(self.extra_setting[input_size])
        self.l2_norm1 = L2Norm(
            self.features1[out_feature_indices[0] - 1].out_channels,
            l2_norm_scale)
        self.l2_norm2 = L2Norm(
            self.features2[out_feature_indices[0] - 1].out_channels,
            l2_norm_scale)

        # init fusion layers
        for ch in self.fus_layer_channels:
            conv = nn.Conv2d(ch*2, ch, 1)
            bn = nn.BatchNorm2d(ch)
            relu = nn.ReLU(inplace=True)
            fus_layers.extend([conv, bn, relu])
        self.add_module('fusion', nn.Sequential(*fus_layers))

    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.features1.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
                elif isinstance(m, nn.Linear):
                    normal_init(m, std=0.01)
            for m in self.features2.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
                elif isinstance(m, nn.Linear):
                    normal_init(m, std=0.01)
            for m in self.fusion.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, nn.BatchNorm2d):
                    constant_init(m, 1)
                elif isinstance(m, nn.Linear):
                    normal_init(m, std=0.01)
        else:
            raise TypeError('pretrained must be a str or None')

        for m in self.extra1.modules():
            if isinstance(m, nn.Conv2d):
                xavier_init(m, distribution='uniform')
        for m in self.extra2.modules():
            if isinstance(m, nn.Conv2d):
                xavier_init(m, distribution='uniform')

        constant_init(self.l2_norm1, self.l2_norm1.scale)
        constant_init(self.l2_norm2, self.l2_norm2.scale)

    def forward(self, x):
        """Forward function."""
        x1 = x[:,:3,:,:]
        x2 = x[:,3:,:,:]        
        outs = []
        real_outs = []
        for i in range(len(self.features1)):
            x1 = self.features1[i](x1)
            x2 = self.features2[i](x2)
            if i in self.out_feature_indices:
                outs.append((x1, x2))
        for i in range(len(self.extra1)):
            x1 = F.relu(self.extra1[i](x1), inplace=True)
            x2 = F.relu(self.extra1[i](x2), inplace=True)
            if i % 2 == 1:
                outs.append((x1, x2))
        outs[0] = (self.l2_norm1(outs[0][0]),self.l2_norm2(outs[0][1]))
        if len(outs) == 1:
            real_outs.append(self.fusion[0](torch.cat((outs[0][0],outs[0][1]), dim=1)))
            real_outs[0] = self.fusion[1](real_outs[0])
            real_outs[0] = self.fusion[2](real_outs[0])
            return real_outs[0]
        else:
            for i in range(len(self.fusion)):
                if i%3 == 0:
                    layer_index = i//3
                    out = self.fusion[layer_index*3](torch.cat((outs[layer_index][0],outs[layer_index][1]), dim=1)) # conv
                    out = self.fusion[layer_index*3+1](out)       # bn
                    out = self.fusion[layer_index*3+2](out)       # relu
                    real_outs.append(out)
            return tuple(real_outs)

    def _make_extra_layers(self, outplanes):
        layers = []
        kernel_sizes = (1, 3)
        num_layers = 0
        outplane = None
        for i in range(len(outplanes)):
            if self.inplanes == 'S':
                self.inplanes = outplane
                continue
            k = kernel_sizes[num_layers % 2]
            if outplanes[i] == 'S':
                outplane = outplanes[i + 1]
                conv = nn.Conv2d(
                    self.inplanes, outplane, k, stride=2, padding=1)
            else:
                outplane = outplanes[i]
                conv = nn.Conv2d(
                    self.inplanes, outplane, k, stride=1, padding=0)
            layers.append(conv)
            self.inplanes = outplanes[i]
            num_layers += 1
        if self.input_size == 512:
            layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))

        return nn.Sequential(*layers)


class L2Norm(nn.Module):

    def __init__(self, n_dims, scale=20., eps=1e-10):
        """L2 normalization layer.

        Args:
            n_dims (int): Number of dimensions to be normalized
            scale (float, optional): Defaults to 20..
            eps (float, optional): Used to avoid division by zero.
                Defaults to 1e-10.
        """
        super(L2Norm, self).__init__()
        self.n_dims = n_dims
        self.weight = nn.Parameter(torch.Tensor(self.n_dims))
        self.eps = eps
        self.scale = scale

    def forward(self, x):
        """Forward function."""
        # normalization layer convert to FP32 in FP16 training
        x_float = x.float()
        norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
        return (self.weight[None, :, None, None].float().expand_as(x_float) *
                x_float / norm).type_as(x)


if __name__ == '__main__':
        net = TwoWay_SSDVGG(input_size=512, depth=16)
        net.init_weights()
        state_dict = net.state_dict()
        torch.save(state_dict, '/media/gejunyao/Disk/Gejunyao/develop/temp/temp.pth')
        # print(net)
        # net.eval()
        # inputs = torch.rand(1, 6, 512, 512)
        # level_outputs = net.forward(inputs)
        # for level_out in level_outputs:
        #    print(level_out.shape)
        '''
        (torch.Size([1, 512, 64, 64]), torch.Size([1, 512, 64, 64]))
        (torch.Size([1, 1024, 32, 32]), torch.Size([1, 1024, 32, 32]))
        (torch.Size([1, 512, 16, 16]), torch.Size([1, 512, 16, 16]))
        (torch.Size([1, 256, 8, 8]), torch.Size([1, 256, 8, 8]))
        (torch.Size([1, 256, 4, 4]), torch.Size([1, 256, 4, 4]))
        (torch.Size([1, 256, 2, 2]), torch.Size([1, 256, 2, 2]))
        (torch.Size([1, 256, 1, 1]), torch.Size([1, 256, 1, 1]))
        '''