from typing import List

import torch.nn as nn
from mmdet.models import NECKS
from mmcv.ops import DeformConv2dPack


@NECKS.register_module()
class DeconvUP(nn.Module):
    def __init__(self, in_channels: int, out_channels_list: List[int]):
        super().__init__()
        self.deconv_layers = self._make_deconv_layer(in_channels,
                                                     out_channels_list)

    def _make_deconv_layer(self, in_channels, out_channels_list):
        layers = list()
        for i in range(len(out_channels_list)):
            out_channels = out_channels_list[i]
            layers.append(
                nn.Conv2d(in_channels=in_channels,
                          out_channels=out_channels,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False))
            layers.append(nn.BatchNorm2d(out_channels))
            layers.append(nn.ReLU(inplace=True))
            layers.append(
                nn.ConvTranspose2d(in_channels=out_channels,
                                   out_channels=out_channels,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=False))
            layers.append(nn.BatchNorm2d(out_channels))
            layers.append(nn.ReLU(inplace=True))
            in_channels = out_channels
        return nn.Sequential(*layers)

    def init_weights(self):
        """Initialize the weights of FPN module."""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            if isinstance(m, nn.ConvTranspose2d):
                nn.init.normal_(m.weight, std=0.001)
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, inputs):
        return self.deconv_layers(inputs)


@NECKS.register_module()
class DeconvUPDeform(nn.Module):
    def __init__(self, in_channels: int, out_channels_list: List[int]):
        super().__init__()
        self.deconv_layers = self._make_deconv_layer(in_channels,
                                                     out_channels_list)

    def _make_deconv_layer(self, in_channels, out_channels_list):
        layers = list()
        for i in range(len(out_channels_list)):
            out_channels = out_channels_list[i]
            layers.append(
                DeformConv2dPack(in_channels=in_channels,
                                 out_channels=out_channels,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 bias=False))
            layers.append(nn.BatchNorm2d(out_channels))
            layers.append(nn.ReLU(inplace=True))
            layers.append(
                nn.ConvTranspose2d(in_channels=out_channels,
                                   out_channels=out_channels,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=False))
            layers.append(nn.BatchNorm2d(out_channels))
            layers.append(nn.ReLU(inplace=True))
            in_channels = out_channels
        return nn.Sequential(*layers)

    def init_weights(self):
        """Initialize the weights of FPN module."""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            if isinstance(m, nn.ConvTranspose2d):
                nn.init.normal_(m.weight, std=0.001)
            if isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, inputs):
        return self.deconv_layers(inputs)
