import math
import torch.nn as nn


class CBA_M_Block(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, conv_padding=1, enable_maxpool=True, maxpool_stride=2,
                 enable_zeropadding_rb=False, enable_moblile=False):

        super(CBA_M_Block, self).__init__()

        self.bn = nn.BatchNorm2d(num_features=out_channels)
        self.activate = nn.LeakyReLU(negative_slope=0.1)

        self.enable_maxpool = enable_maxpool
        if self.enable_maxpool:
            self.pool = nn.MaxPool2d(kernel_size=2, stride=maxpool_stride)

        self.enable_zeropadding_rb = enable_zeropadding_rb
        if enable_zeropadding_rb:
            self.zeropadding_rb = nn.ZeroPad2d(padding=(0, 1, 0, 1))

        self.enable_moblile = enable_moblile
        if self.enable_moblile:
            self.depth_conv = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=conv_padding,
                                        bias=False, groups=in_channels)
            self.point_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        else:
            self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                                  padding=conv_padding, bias=False)

    def forward(self, x):

        # 深度可分离卷积
        if self.enable_moblile:
            x = self.depth_conv(x)
            out = self.point_conv(x)
        # 普通卷积
        else:
            out = self.conv(x)

        x = self.bn(out)
        x = self.activate(x)
        # 启用填充边缘
        if self.enable_zeropadding_rb:
            x = self.zeropadding_rb(x)
        # 启用池化操作
        if self.enable_maxpool:
            x = self.pool(x)
        return x, out


class Backbone(nn.Module):
    def __init__(self, enable_moblile):
        super(Backbone, self).__init__()
        # cbam = Conv2d + BatchNorm2d + Activate + MaxPool2d
        # cbazm = Conv2d + BatchNorm2d + Activate + ZeroPad2d + MaxPool2d
        # cba = Conv2d + BatchNorm2d + Activate
        self.cbam1 = CBA_M_Block(in_channels=3, out_channels=16, enable_moblile=enable_moblile)
        self.cbam2 = CBA_M_Block(in_channels=16, out_channels=32, enable_moblile=enable_moblile)
        self.cbam3 = CBA_M_Block(in_channels=32, out_channels=64, enable_moblile=enable_moblile)
        self.cbam4 = CBA_M_Block(in_channels=64, out_channels=128, enable_moblile=enable_moblile)
        self.cbam5 = CBA_M_Block(in_channels=128, out_channels=256, enable_moblile=enable_moblile)
        self.cbazm = CBA_M_Block(in_channels=256, out_channels=512, maxpool_stride=1, enable_zeropadding_rb=True,
                                 enable_moblile=enable_moblile)

        self.cba1 = CBA_M_Block(in_channels=512, out_channels=1024, enable_maxpool=False, enable_moblile=enable_moblile)

        # 进行权值初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):

        x, _ = self.cbam1(x)
        x, _ = self.cbam2(x)
        x, _ = self.cbam3(x)
        x, _ = self.cbam4(x)
        x, out2 = self.cbam5(x)
        x, _ = self.cbazm(x)
        out1, _ = self.cba1(x)

        return out1, out2
