import torch
import torch.nn as nn
import torch.nn.functional as F


class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()

        # 使用1x1卷积进行降维
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Conv2d(in_channels, in_channels // reduction_ratio, kernel_size=1)
        self.relu = nn.ReLU(inplace=True)
        self.fc2 = nn.Conv2d(in_channels // reduction_ratio, in_channels, kernel_size=1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # 通过全局平均池化得到通道权重向量
        avg_weight = self.avg_pool(x)

        # 通过一系列卷积和激活函数计算通道权重向量
        avg_weight = self.fc1(avg_weight)
        avg_weight = self.relu(avg_weight)
        avg_weight = self.fc2(avg_weight)
        avg_weight = self.sigmoid(avg_weight)

        # 通过乘积操作将通道权重向量作用于输入特征图上
        out = x * avg_weight

        return out


import torch

# 创建一个大小为 [1, 64, 16, 16] 的随机输入特征图
x = torch.randn(1, 64, 16, 16)

# 创建一个 ChannelAttention 模块
ca = ChannelAttention(64)

# 打印输入特征图的平均值和标准差
print('Input feature map mean:', x.mean().item())
print('Input feature map std:', x.std().item())

# 将输入特征图输入 ChannelAttention 模块并得到输出特征图
out = ca(x)

# 打印输出特征图的平均值和标准差
print('Output feature map mean:', out.mean().item())
print('Output feature map std:', out.std().item())
