import torch
import torch.nn as nn
import torch.nn.functional as F

from models.osmnet import L2NormDense, input_norm


class BN_Conv2d_Leaky(nn.Module):
    """
    BN_CONV_LeakyRELU
    """

    def __init__(self, in_channels: object, out_channels: object, 
        kernel_size: object, stride: object, padding: object,
        dilation=1, groups=1, bias=False
    ) -> object:
        super(BN_Conv2d_Leaky, self).__init__()
        self.seq = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 
                kernel_size=kernel_size, stride=stride,
                padding=padding, dilation=dilation, 
                groups=groups, bias=bias
            ),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self, x):
        return F.leaky_relu(self.seq(x))


class ResidualBlock(nn.Module):
    """
    Residual block for CSP-ResNeXt
    """

    def __init__(self, in_channels, cardinality, group_width, stride=1):
        super(ResidualBlock, self).__init__()
        self.out_channels = cardinality * group_width
        self.conv1 = BN_Conv2d_Leaky(in_channels, self.out_channels, 
            3, stride, 1, groups=cardinality)
        self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, 
            3, 1, 1, groups=cardinality)
        self.bn = nn.BatchNorm2d(self.out_channels)

        # make shortcut
        layers = []
        if in_channels != self.out_channels:
            layers.append(nn.Conv2d(in_channels, self.out_channels, 1, 1, 0))
            layers.append(nn.BatchNorm2d(self.out_channels))
        if stride != 1:
            layers.append(nn.AvgPool2d(stride))
        self.shortcut = nn.Sequential(*layers)

    def forward(self, x):
        out = self.conv2(self.conv1(x))
        out = self.bn(out)
        out += self.shortcut(x)
        return F.leaky_relu(out)


class CSPResBlock(nn.Module):

    def __init__(self, in_channels, num_blocks,        
        cardinality, group_with, stride=1):

        super(CSPResBlock, self).__init__()
        self.c0 = in_channels // 2
        self.c1 = in_channels - in_channels // 2
        self.hidden_channels = cardinality * group_with
        self.out_channels = in_channels

        self.transition = BN_Conv2d_Leaky(
            self.hidden_channels, in_channels, 1, 1, 0
        )
        self.trans_part0 = nn.Sequential(
            BN_Conv2d_Leaky(self.c0, self.hidden_channels,
                1, 1, 0)
            )
        self.block = self.__make_block(num_blocks, self.c1, 
            cardinality, group_with, stride
        )
        self.trans_part1 = BN_Conv2d_Leaky(
            self.hidden_channels, self.hidden_channels, 
            1, 1, 0
        )
        self.trans = BN_Conv2d_Leaky(
            self.hidden_channels*2, self.out_channels, 
            1, 1, 0
        )

    def __make_block(self, num_blocks, in_channels, 
        cardinality, group_with, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        channels = [in_channels] + [self.hidden_channels] * (num_blocks - 1)
        return nn.Sequential(
            *[ResidualBlock(c, cardinality, group_with, s)
            for c, s in zip(channels, strides)]
        )

    def forward(self, x):
        x = self.transition(x)
        x0 = x[:, :self.c0, :, :]
        x1 = x[:, self.c0:, :, :]
        out0 = self.trans_part0(x0)
        out1 = self.trans_part1(self.block(x1))
        out = torch.cat((out0, out1), 1)
        return self.trans(out)


class CSPResNet(nn.Module):

    def __init__(self, in_channels, channels, num_blocks, 
        cadinality, out_ch):
        """
        init

        Param
        ----
        in_channels :   channel of img
        num_blocks :    num of res struct each block
        cadinality, group_width :   cadinality*group_width in_channel of first block
        out_ch :    channel of descriptor
        """
        super(CSPResNet, self).__init__()

        self.extend = BN_Conv2d_Leaky(in_channels, 32, 
            3, 1, 1)

        self.block1 = CSPResBlock(
            channels[0], num_blocks[0], cadinality, 
            32//cadinality
        )
        self.block2 = CSPResBlock(
            channels[1], num_blocks[1], cadinality, 
            channels[0]//cadinality
        )
        self.block3 = CSPResBlock(
            channels[2], num_blocks[2], cadinality, 
            channels[1]//cadinality
        )

        self.reduce = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(channels[2], out_ch, 
                kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, x):
        x = input_norm(x)

        out = self.extend(x)
        out = self.block1(out)
        out = self.block2(out)
        out = self.block3(out)
        out = self.reduce(out)

        L2norm_feature = L2NormDense()(out)
        return out, L2norm_feature


class CSPResNet128(CSPResNet):

    def __init__(self, in_channels=1, out_ch=9):
        super(CSPResNet128, self).__init__(
            in_channels, [32, 64, 128], [2, 2, 2], 8, out_ch
        )


if __name__ == "__main__":

    # net = DDFN()
    # net = ResCAMDDFN()
    # net = ResSCDDFN()
    # net = ResDDFNnoReduct()
    # net = SSLCCAM3SAM()
    # net = SSLC_SAMMask()
    # net = ResDDFNCAM3SAMMask()
    net = CSPResNet128().to("cuda")
    print(net)

    in_ = torch.zeros((2, 1, 128, 128)).to("cuda")

    _, output = net(in_)
    print(output.shape)

