import torch
import torch.nn as nn

class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        # 定义下采样的权重和偏差
        self.qd = nn.Conv2d(in_channels*2, in_channels*2 // reduction_ratio, kernel_size=1)
        self.bd = nn.Parameter(torch.zeros(1,in_channels*2 // reduction_ratio, 1,1))

        # 定义上采样的权重和偏差
        self.qu = nn.Conv2d(in_channels*2 // reduction_ratio, in_channels*2, kernel_size=1)
        self.bu = nn.Parameter(torch.zeros(1, in_channels*2, 1, 1))

        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()

    def forward(self, x):
        cavg = self.avg_pool(x)  # (B, C, 1, 1) -> (B, 64, 1, 1)
        cmax = self.max_pool(x)  # (B, C, 1, 1) -> (B, 64, 1, 1)

        combined = torch.cat([cavg,cmax],dim=1)

        # 下采样
        cd = self.qd(combined) + self.bd
        cd = self.relu(cd)
        # 上采样

        c_prime = self.qu(cd) + self.bu
        c_prime = self.relu(c_prime)

        # 拆分
        c_avg,c_max = c_prime.split(cavg.size(1),dim=1)

        # 计算通道注意力值A_c
        c = c_avg + c_max
        A_c = self.sigmoid(c)

        return A_c

# 示例使用
N = 1  # Batch size
in_channels = 64  # 输入通道数
width = 32  # 特征矩阵的宽度
height = 32  # 特征矩阵的高度

# 创建输入特征矩阵 (N, in_channels, w, h)
x = torch.randn(N, in_channels, height, width)

# 实例化 ChannelAttentionModule
channel_attention_module = ChannelAttention(in_channels=64)

#计算
A_c = channel_attention_module(x)

# 输出结果
print(A_c.shape)  # 形状应为 [1, 64, 1, 1]
