import torch.nn.functional as F
import torch.nn.init as init

from utils.util import *


def make_divisible(v, divisor):
    # Function ensures all layers have a channel number that is divisible by 8
    # https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    return math.ceil(v / divisor) * divisor


# Torch operation functions below --------------------------------------------------------------------------------------
class Flatten(nn.Module):
    # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
    def forward(self, x):
        return x.view(x.size(0), -1)


class Concat(nn.Module):
    # Concatenate a list of tensors along dimension
    def __init__(self, dimension=1):
        super(Concat, self).__init__()
        self.d = dimension

    def forward(self, x):
        return torch.cat(x, self.d)


class FeatureConcat(nn.Module):
    def __init__(self, layers):
        super(FeatureConcat, self).__init__()
        self.layers = layers  # layer indices
        self.multiple = len(layers) > 1  # multiple layers flag

    def forward(self, x, outputs):
        return torch.cat([outputs[i] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]]


class WeightedFeatureFusion(nn.Module):  # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
    def __init__(self, layers, weight=False):
        super(WeightedFeatureFusion, self).__init__()
        self.layers = layers  # layer indices
        self.weight = weight  # apply weights boolean
        self.n = len(layers) + 1  # number of layers
        if weight:
            self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)  # layer weights

    def forward(self, x, outputs):
        # Weights
        if self.weight:
            w = torch.sigmoid(self.w) * (2 / self.n)  # sigmoid weights (0-1)
            x = x * w[0]

        # Fusion
        nx = x.shape[1]  # input channels
        for i in range(self.n - 1):
            a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]]  # feature to add
            na = a.shape[1]  # feature channels

            # Adjust channels
            if nx == na:  # same shape
                x = x + a
            elif nx > na:  # slice input
                x[:, :na] = x[:, :na] + a  # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a
            else:  # slice feature
                x = x + a[:, :nx]

        return x


class MixConv2d(nn.Module):  # MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595
    def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'):
        super(MixConv2d, self).__init__()

        groups = len(k)
        if method == 'equal_ch':  # equal channels per group
            i = torch.linspace(0, groups - 1E-6, out_ch).floor()  # out_ch indices
            ch = [(i == g).sum() for g in range(groups)]
        else:  # 'equal_params': equal parameter count per group
            b = [out_ch] + [0] * groups
            a = np.eye(groups + 1, groups, k=-1)
            a -= np.roll(a, 1, axis=1)
            a *= np.array(k) ** 2
            a[0] = 1
            ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype(int)  # solve for equal weight indices, ax = b

        self.m = nn.ModuleList([nn.Conv2d(in_channels=in_ch,
                                          out_channels=ch[g],
                                          kernel_size=k[g],
                                          stride=stride,
                                          padding=k[g] // 2,  # 'same' pad
                                          dilation=dilation,
                                          bias=bias) for g in range(groups)])

    def forward(self, x):
        return torch.cat([m(x) for m in self.m], 1)


class RouteGroup(nn.Module):

    def __init__(self, layers, groups, group_id):
        super(RouteGroup, self).__init__()
        self.layers = layers
        self.multi = len(layers) > 1
        self.groups = groups
        self.group_id = group_id

    def forward(self, x, outputs):
        if self.multi:
            outs = []
            for layer in self.layers:
                out = torch.chunk(outputs[layer], self.groups, dim=1)
                outs.append(out[self.group_id])
            return torch.cat(outs, dim=1)
        else:
            out = torch.chunk(outputs[self.layers[0]], self.groups, dim=1)
            return out[self.group_id]


# Activation functions below -------------------------------------------------------------------------------------------
class SwishImplementation(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x):
        ctx.save_for_backward(x)
        return x * torch.sigmoid(x)

    @staticmethod
    def backward(ctx, grad_output):
        x = ctx.saved_tensors[0]
        sx = torch.sigmoid(x)  # sigmoid(ctx)
        return grad_output * (sx * (1 + x * (1 - sx)))


class MishImplementation(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x):
        ctx.save_for_backward(x)
        return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))

    @staticmethod
    def backward(ctx, grad_output):
        x = ctx.saved_tensors[0]
        sx = torch.sigmoid(x)
        fx = F.softplus(x).tanh()
        return grad_output * (fx + x * sx * (1 - fx * fx))


class MemoryEfficientSwish(nn.Module):
    def forward(self, x):
        return SwishImplementation.apply(x)


class MemoryEfficientMish(nn.Module):
    def forward(self, x):
        return MishImplementation.apply(x)


class Swish(nn.Module):
    def forward(self, x):
        return x * torch.sigmoid(x)


class HardSwish(nn.Module):  # https://arxiv.org/pdf/1905.02244.pdf
    def forward(self, x):
        return x * F.hardtanh(x + 3, 0., 6., True) / 6.


class Mish(nn.Module):  # https://github.com/digantamisra98/Mish
    def forward(self, x):
        return x * F.softplus(x).tanh()


# @Author AIWalker https://zhuanlan.zhihu.com/p/142650829
class DyReLUA(nn.Module):
    def __init__(self,
                 channels,
                 reduction=4,
                 k=2):
        super().__init__()
        self.channels = channels
        self.reduction = reduction
        self.k = k

        self.coef = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels // reduction, 2 * k, 1),
            nn.Sigmoid()
        )

        # default parameter setting
        # lambdaA = 1.0, lambdaB = 0.5;
        # alphaA1 = 1, alphaA2=alphaB1=alphaB2=0
        self.register_buffer('lambdas', torch.Tensor([1.] * k + [0.5] * k).float())
        self.register_buffer('bias', torch.Tensor([1.] + [0.] * (2 * k - 1)).float())

    def forward(self, x):
        coef = self.coef(x)
        coef = 2 * coef - 1
        coef = coef.view(-1, 2 * self.k) * self.lambdas + self.bias

        # activations
        # NCHW --> NCHW1
        x_perm = x.permute(1, 2, 3, 0).unsqueeze(-1)
        # HWNC1 * NK --> HWCNK
        output = x_perm * coef[:, :self.k] + coef[:, self.k:]
        result = torch.max(output, dim=-1)[0].permute(3, 0, 1, 2)
        return result


class DyReLUB(nn.Module):
    def __init__(self,
                 channels,
                 reduction=4,
                 k=2):
        super().__init__()
        self.channels = channels
        self.reduction = reduction
        self.k = k

        self.coef = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels // reduction, 2 * k * channels, 1),
            nn.Sigmoid()
        )

        # default parameter setting
        # lambdaA = 1.0, lambdaB = 0.5;
        # alphaA1 = 1, alphaA2=alphaB1=alphaB2=0
        self.register_buffer('lambdas', torch.Tensor([1.] * k + [0.5] * k).float())
        self.register_buffer('bias', torch.Tensor([1.] + [0.] * (2 * k - 1)).float())

    def forward(self, x):
        coef = self.coef(x)
        coef = 2 * coef - 1

        # coefficient update
        coef = coef.view(-1, self.channels, 2 * self.k) * self.lambdas + self.bias

        # activations
        # NCHW --> HWNC1
        x_perm = x.permute(2, 3, 0, 1).unsqueeze(-1)
        # HWNC1 * NCK --> HWNCK
        output = x_perm * coef[:, :, :self.k] + coef[:, :, self.k:]
        # maxout and HWNC --> NCHW
        result = torch.max(output, dim=-1)[0].permute(2, 3, 0, 1)
        return result


# todo:fix bug
class DyReLUC(nn.Module):
    def __init__(self,
                 channels,
                 reduction=4,
                 k=2,
                 tau=10,
                 gamma=1 / 3):
        super().__init__()
        self.channels = channels
        self.reduction = reduction
        self.k = k
        self.tau = tau
        self.gamma = gamma

        self.coef = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels // reduction, 2 * k * channels, 1),
            nn.Sigmoid()
        )
        self.sptial = nn.Conv2d(channels, 1, 1)

        # default parameter setting
        # lambdaA = 1.0, lambdaB = 0.5;
        # alphaA1 = 1, alphaA2=alphaB1=alphaB2=0
        self.register_buffer('lambdas', torch.Tensor([1.] * k + [0.5] * k).float())
        self.register_buffer('bias', torch.Tensor([1.] + [0.] * (2 * k - 1)).float())

    def forward(self, x):
        N, C, H, W = x.size()
        coef = self.coef(x)
        coef = 2 * coef - 1

        # coefficient update
        coef = coef.view(-1, self.channels, 2 * self.k) * self.lambdas + self.bias

        # spatial
        gamma = self.gamma * H * W
        spatial = self.sptial(x)
        spatial = spatial.view(N, self.channels, -1) // self.tau
        spatial = torch.softmax(spatial, dim=-1) * gamma
        spatial = torch.clamp(spatial, 0, 1).view(N, 1, H, W)

        # activations
        # NCHW --> HWNC1
        x_perm = x.permute(2, 3, 0, 1).unsqueeze(-1)
        # HWNC1 * NCK --> HWNCK
        output = x_perm * coef[:, :, :self.k] + coef[:, :, self.k:]

        # permute spatial from NCHW to HWNC1
        spatial = spatial.permute(2, 3, 0, 1).unsqueeze(-1)
        output = spatial * output

        # maxout and HWNC --> NCHW
        result = torch.max(output, dim=-1)[0].permute(2, 3, 0, 1)
        return result


# Conv blocks bellow ---------------------------------------------------------------------------------------------------

class SELayer(nn.Module):
    def __init__(self, in_plane, reduction=16):
        super(SELayer, self).__init__()
        # squeeze: global pooling
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        # excitation: channel reduction with radio r; sigmod(W2(relu(W1)))
        self.fc1 = nn.Sequential(
            nn.Linear(in_plane, in_plane // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(in_plane // reduction, in_plane),
            nn.Sigmoid())
        self.fc2 = nn.Sequential(
            nn.Conv2d(in_plane, in_plane // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_plane, in_plane // reduction, 1, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc1(y).view(b, c, 1, 1)
        return x * y


class ChannelAttentionBlock(nn.Module):
    #  Dimension reduction ratio, default=16
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttentionBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)

        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return x * self.sigmoid(out).expand_as(x), out


class SpatialAttentionBlock(nn.Module):
    def __init__(self, kernel_size=7, acblock=False):
        super(SpatialAttentionBlock, self).__init__()

        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1
        if acblock:
            self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        else:
            self.conv1 = ACCovnBlock(2, 1, kernel_size, padding=padding)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        y = torch.cat([avg_out, max_out], dim=1)
        y = self.conv1(y)
        y1 = self.sigmoid(y)
        return x * y1.expand_as(x), y


class CBAM(nn.Module):
    def __init__(self, order, in_plane, kernel_size=7, ratio=16):
        super().__init__()
        self.oder = order
        self.ca = ChannelAttentionBlock(in_plane, ratio)
        self.sa = SpatialAttentionBlock(kernel_size)
        self.sigmoid = nn.Sigmoid()
        self.bn = nn.BatchNorm2d(num_features=in_plane)

    def forward(self, x):
        if self.oder == 1:
            x, _ = self.ca(x)
            x, _ = self.sa(x)
        elif self.oder == 2:
            x, _ = self.sa(x)
            x, _ = self.ca(x)
        # parallel CBAM
        elif self.oder == 3:
            _, sa = self.sa(x)
            _, ca = self.ca(x)
            w = torch.add(sa, ca)
            w = self.bn(w)
            w = self.sigmoid(w)
            x = x * w.expand_as(x)
        return x


class ACCovnBlock(nn.Module):

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1,
                 padding_mode='zeros', deploy=False,
                 use_affine=True, reduce_gamma=False, use_last_bn=False, gamma_init=None):
        super(ACCovnBlock, self).__init__()
        self.deploy = deploy  # 是否处于deploy
        if deploy:
            self.fused_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                                        kernel_size=(kernel_size, kernel_size), stride=stride,
                                        padding=padding, dilation=dilation, groups=groups, bias=True,
                                        padding_mode=padding_mode)
        else:
            self.square_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                                         kernel_size=(kernel_size, kernel_size), stride=stride,
                                         padding=padding, dilation=dilation, groups=groups, bias=False,
                                         padding_mode=padding_mode)
            self.square_bn = nn.BatchNorm2d(num_features=out_channels, affine=use_affine)

            center_offset_from_origin_border = padding - kernel_size // 2
            ver_pad_or_crop = (padding, center_offset_from_origin_border)
            hor_pad_or_crop = (center_offset_from_origin_border, padding)
            if center_offset_from_origin_border >= 0:  # 判断边界中心便宜
                self.ver_conv_crop_layer = nn.Identity()
                ver_conv_padding = ver_pad_or_crop
                self.hor_conv_crop_layer = nn.Identity()
                hor_conv_padding = hor_pad_or_crop
            else:
                self.ver_conv_crop_layer = CropLayer(crop_set=ver_pad_or_crop)
                ver_conv_padding = (0, 0)
                self.hor_conv_crop_layer = CropLayer(crop_set=hor_pad_or_crop)
                hor_conv_padding = (0, 0)
            self.ver_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(kernel_size, 1),
                                      stride=stride,
                                      padding=ver_conv_padding, dilation=dilation, groups=groups, bias=False,
                                      padding_mode=padding_mode)

            self.hor_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, kernel_size),
                                      stride=stride,
                                      padding=hor_conv_padding, dilation=dilation, groups=groups, bias=False,
                                      padding_mode=padding_mode)
            self.ver_bn = nn.BatchNorm2d(num_features=out_channels, affine=use_affine)
            self.hor_bn = nn.BatchNorm2d(num_features=out_channels, affine=use_affine)

            if reduce_gamma:
                assert not use_last_bn
                self.init_gamma(1.0 / 3)

            if use_last_bn:
                assert not reduce_gamma
                self.last_bn = nn.BatchNorm2d(num_features=out_channels, affine=True)

            if gamma_init is not None:
                assert not reduce_gamma
                self.init_gamma(gamma_init)

    def init_gamma(self, gamma_value):
        init.constant_(self.square_bn.weight, gamma_value)
        init.constant_(self.ver_bn.weight, gamma_value)
        init.constant_(self.hor_bn.weight, gamma_value)
        print('init gamma of square, ver and hor as ', gamma_value)

    def single_init(self):
        init.constant_(self.square_bn.weight, 1.0)
        init.constant_(self.ver_bn.weight, 0.0)
        init.constant_(self.hor_bn.weight, 0.0)
        print('init gamma of square as 1, ver and hor as 0')

    def forward(self, x):
        if self.deploy:
            return self.fused_conv(x)
        else:
            square_outputs = self.square_conv(x)
            square_outputs = self.square_bn(square_outputs)
            vertical_outputs = self.ver_conv_crop_layer(x)
            vertical_outputs = self.ver_conv(vertical_outputs)
            vertical_outputs = self.ver_bn(vertical_outputs)
            horizontal_outputs = self.hor_conv_crop_layer(x)
            horizontal_outputs = self.hor_conv(horizontal_outputs)
            horizontal_outputs = self.hor_bn(horizontal_outputs)
            result = square_outputs + vertical_outputs + horizontal_outputs
            if hasattr(self, 'last_bn'):
                return self.last_bn(result)
            return result


class CropLayer(nn.Module):

    # E.g., (-1, 0) means this layer should crop the first and last rows of the feature map.
    #  And (0, -1) crops the first and last columns
    def __init__(self, crop_set):
        super(CropLayer, self).__init__()
        self.rows_to_crop = - crop_set[0]
        self.cols_to_crop = - crop_set[1]
        assert self.rows_to_crop >= 0
        assert self.cols_to_crop >= 0

    def forward(self, x):
        if self.rows_to_crop == 0 and self.cols_to_crop == 0:
            return x
        elif self.rows_to_crop > 0 and self.cols_to_crop == 0:
            return x[:, :, self.rows_to_crop:-self.rows_to_crop, :]
        elif self.rows_to_crop == 0 and self.cols_to_crop > 0:
            return x[:, :, :, self.cols_to_crop:-self.cols_to_crop]
        else:
            return x[:, :, self.rows_to_crop:-self.rows_to_crop, self.cols_to_crop:-self.cols_to_crop]


class SCConvBlock(nn.Module):
    def __init__(self, inplanes,
                 customk2=None,
                 padding=1, dilation=1, groups=1, stride=1, norm_layer=nn.BatchNorm2d, pooling_r=1):

        super(SCConvBlock, self).__init__()
        # k1
        # bottleneck_width = 8
        cardinality = 1
        # group_width = int(outplanes * (bottleneck_width / 64.)) * cardinality
        # inplane same as outplane
        # group_plane = int(outplanes / 64.) * cardinality  # uniformly split x into two Xs
        inplanes = int(inplanes / 2)
        outplanes = inplanes
        self.k1 = nn.Sequential(
            nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride,
                      padding=padding, dilation=dilation,
                      groups=groups, bias=False),
            norm_layer(outplanes),
        )
        if customk2:
            convk2 = customk2
        else:
            convk2 = nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1,
                               padding=padding, dilation=dilation,
                               groups=groups, bias=False)
        self.k2 = nn.Sequential(
            nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r),
            convk2,
            norm_layer(outplanes),
        )
        self.k3 = nn.Sequential(
            nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1,
                      padding=padding, dilation=dilation,
                      groups=groups, bias=False),
            norm_layer(outplanes),
        )
        self.k4 = nn.Sequential(
            nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride,
                      padding=padding, dilation=dilation,
                      groups=groups, bias=False),
            norm_layer(outplanes),
        )

    def forward(self, x):
        # x1 x2
        x = torch.chunk(x, 2, 1)
        x1 = x[0]
        identity = x1
        x2 = x[1]
        y2 = self.k1(x2)  # k1
        y1 = torch.sigmoid(
            torch.add(identity, F.interpolate(self.k2(x1), identity.size()[2:])))  # sigmoid(identity + k2)
        y1 = torch.mul(self.k3(x1), y1)  # k3 * sigmoid(identity + k2)
        y1 = self.k4(y1)  # k4

        return torch.cat([y1, y2], dim=1)


'''
shape对应的数必须是奇数
[spatialmaxpool]
# 52x52 26x26 13x13
from=75, 70, 62
shape=13, 13, 13
out_plane = 128
'''


class SpatialMaxpool(nn.Module):
    def __init__(self, shapes, filters, out_plane=128):
        # shapes: type=list
        # filters: type=list
        super(SpatialMaxpool, self).__init__()

        self.spp1 = nn.MaxPool2d(  # 52
            kernel_size=shapes[0],
            stride=1,
            padding=int((shapes[0] - 1) // 2))
        self.conv1x1_1 = nn.Conv2d(filters[0], out_plane, kernel_size=3,
                                   stride=2,
                                   padding=1)

        self.spp2 = nn.MaxPool2d(  # 26
            kernel_size=shapes[1],
            stride=1,
            padding=int((shapes[1] - 1) // 2))
        self.conv1x1_2 = nn.Conv2d(filters[1], out_plane, kernel_size=1,
                                   stride=1,
                                   padding=0)

        self.spp3 = nn.MaxPool2d(  # 13
            kernel_size=shapes[2],
            stride=1,
            padding=int((shapes[2] - 1) // 2))
        self.conv1x1_3 = nn.Conv2d(filters[2], out_plane, kernel_size=1,
                                   stride=1,
                                   padding=0)

        self.us_spp3 = nn.Upsample(scale_factor=2, mode='nearest')

    def forward(self, x1, x2, x3):
        # 52 26 13
        out1 = self.conv1x1_1(self.spp1(x1))
        out2 = self.conv1x1_2(self.spp2(x2))
        out3 = self.us_spp3(self.conv1x1_3(self.spp3(x3)))
        return out1 + out2 + out3


'''
并不是常规的se，而是特殊的se-->sse
# layer=80
[se]
# attention feature
from=62, -1
reduction=4
out_plane=256# 这个地方要跟上边的值保持一致
'''


class SpecialSE(nn.Module):
    def __init__(self, in_plane, out_plane, reduction=4):
        super(SpecialSE, self).__init__()
        self.out_plane = out_plane
        self.gap = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_plane, in_plane // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(in_plane // reduction, out_plane, bias=False),
            nn.Sigmoid()
        )

    def forward(self, attention, y):
        # apply the attention extracted from x to y
        b, c, _, _ = attention.size()
        attention = self.gap(attention).view(b, c)
        channel_attention = self.fc(attention).view(b, self.out_plane, 1, 1)
        return channel_attention * y


class SAM(nn.Module):
    def __init__(self, in_plane, ca_ratio, kernel_size):
        super().__init__()
        self.ca = ChannelAttentionBlock(in_planes=in_plane, ratio=ca_ratio)
        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1
        self.sc = SCConvBlock(inplanes=in_plane)
        self.ac = ACCovnBlock(in_plane, in_plane, kernel_size=7, padding=padding)
        self.bn = nn.BatchNorm2d(num_features=in_plane)
        self.sigmod = nn.Sigmoid()

    def forward(self, x):
        _, x1 = self.ca(x)
        x2 = self.sc(x)
        x3 = self.ac(x)
        # BN each output
        x = torch.add(x1, x2)
        x = self.bn(x)
        x = torch.add(x, x3)
        x = self.bn(x)
        x = self.sigmod(x)
        return x


class SCAM(nn.Module):
    def __init__(self, in_plane, kernel_size=3, ratio=4):
        super().__init__()
        customk2 = CBAM(order=3, in_plane=int(in_plane / 2), kernel_size=kernel_size, ratio=ratio)
        self.scam = SCConvBlock(in_plane, customk2)

    def forward(self, x):
        return self.scam(x)
