import torch
import torch.nn as nn

class SpatialAttention(nn.Module):
    def __init__(self, in_channels):
        super(SpatialAttention, self).__init__()

        # 定义 3x3 卷积层 (qs) 和偏置 (bs)
        self.qs = nn.Conv2d(2, 1, kernel_size=3, padding=1)  # 输出通道数为 1
        self.bs = nn.Parameter(torch.zeros(1))  # 偏置初始化为零

        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
    def forward(self, x):
        # 计算最大值和平均值
        smax = torch.max(x, dim=1, keepdim=True)[0]  # 计算沿通道维度的最大值
        savg = torch.mean(x, dim=1, keepdim=True)  # 计算沿通道维度的平均值

        # 拼接最大值和平均值
        S_concat = torch.cat((smax, savg), dim=1)  # 拼接后形状为 (N, 2, w, h)

        # 通过卷积、加偏置和激活函数计算 s''
        s_double_prime = self.qs(S_concat) + self.bs  # 形状为 (N, 1, w, h)
        s_double_prime = self.relu(s_double_prime)  # 应用 ReLU 激活函数

        # 计算空间注意力值 A_s
        A_s = self.sigmoid(s_double_prime)  # 形状为 (N, 1, w, h)

        return A_s


# 示例使用
N = 2  # Batch size
in_channels = 64  # 输入通道数
width = 32  # 特征矩阵的宽度
height = 32  # 特征矩阵的高度

# 创建输入特征矩阵 (N, in_channels, w, h)
x = torch.randn(N, in_channels, height, width)

# 实例化 SpatialAttentionModule
spatial_attention = SpatialAttention(in_channels)

# 计算
A_s = spatial_attention(x)

# 输出结果
print(A_s.shape)  # 形状应为 (N, 1, w, h)[2,1,32,32]