'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-31 17:07:50
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 17:17:04
 * @Description: 注意力机制
'''
import paddle
from functools import reduce


class SKConv(paddle.nn.Layer):

    def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32):
        """
        :param in_channels:  输入通道维度
        :param out_channels: 输出通道维度   原论文中 输入输出通道维度相同
        :param stride:  步长，默认为1
        :param M:  分支数
        :param r: 特征Z的长度，计算其维度d 时所需的比率（论文中 特征S->Z 是降维，故需要规定 降维的下界）
        :param L:  论文中规定特征Z的下界，默认为32
        """
        super(SKConv, self).__init__()
        d = max(in_channels // r, L)
        self.M = M
        self.out_channels = out_channels
        self.conv = paddle.nn.LayerList()
        for i in range(M):
            self.conv.append(paddle.nn.Sequential(paddle.nn.Conv2D(
                in_channels=in_channels, out_channels=out_channels,
                kernel_size=3, stride=stride, padding=1 + i, dilation=1 + i,
                groups=32, bias_attr=False), paddle.nn.BatchNorm2D(
                num_features=out_channels), paddle.nn.ReLU()))
        self.global_pool = paddle.nn.AdaptiveAvgPool2D(output_size=1)
        self.fc1 = paddle.nn.Sequential(paddle.nn.Conv2D(in_channels=
            out_channels, out_channels=d, kernel_size=1, bias_attr=False),
            paddle.nn.BatchNorm2D(num_features=d), paddle.nn.ReLU())
        self.fc2 = paddle.nn.Conv2D(in_channels=d, out_channels=
            out_channels * M, kernel_size=1, stride=1, bias_attr=False)
        self.softmax = paddle.nn.Softmax(axis=1)

    def forward(self, input):
        batch_size = input.shape[0]
        output = []
        for i, conv in enumerate(self.conv):
            output.append(conv(input))
        U = reduce(lambda x, y: x + y, output)
        s = self.global_pool(U)
        z = self.fc1(s)
        a_b = self.fc2(z)
        a_b = a_b.reshape(batch_size, self.M, self.out_channels, -1)
        a_b = self.softmax(a_b)
        a_b = list(a_b.chunk(chunks=self.M, axis=1))
        a_b = list(map(lambda x: x.reshape(batch_size, self.out_channels, 1,
            1), a_b))
        V = list(map(lambda x, y: x * y, output, a_b))
        V = reduce(lambda x, y: x + y, V)
        return V


class SELayer(paddle.nn.Layer):

    def __init__(self, channel, reduction=16):
        super(SELayer, self).__init__()
        self.avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=1)
        self.conv1 = paddle.nn.Conv2D(in_channels=channel, out_channels=
            channel // reduction, kernel_size=1, padding=0)
        self.relu = paddle.nn.ReLU()
        self.conv2 = paddle.nn.Conv2D(in_channels=channel // reduction,
            out_channels=channel, kernel_size=1, padding=0)
        self.sigmoid = paddle.nn.Sigmoid()

    def forward(self, x):
        module_input = x
        x = self.avg_pool(x)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.conv2(x)
        x = self.sigmoid(x)
        return module_input * x
