import torch
import torch.nn as nn

class ChannelAttention(nn.Module):
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)

        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)


class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()

        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1

        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x = torch.cat([avg_out, max_out], dim=1)
        x = self.conv1(x)
        return self.sigmoid(x)


class CBAM(nn.Module):
    def __init__(self, in_planes, ratio=16, kernel_size=7):
        super(CBAM, self).__init__()
        self.ca = ChannelAttention(in_planes, ratio)
        self.sa = SpatialAttention(kernel_size)

    def forward(self, x):
        x = self.ca(x) * x
        x = self.sa(x) * x
        return x
class FusionModule(nn.Module):
    def __init__(self, in_channels=32):
        super(FusionModule, self).__init__()
        self.cbam = CBAM(in_planes=in_channels * 2)  # HF和DF各32通道，拼接后为64
        self.conv1x1 = nn.Conv2d(in_channels * 2, in_channels, kernel_size=1)
        self.bn = nn.BatchNorm2d(in_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, hf, df):
        # hf: 高频特征 (b, 32, 128, 160), df: 解码器特征 (b, 32, 128, 160)
        fused = torch.cat([hf, df], dim=1)  # 拼接，(b, 64, 128, 160)
        attended_fused = self.cbam(fused)  # CBAM加权，(b, 64, 128, 160)
        fused_features = self.conv1x1(attended_fused)  # 减少通道，(b, 32, 128, 160)
        fused_features = self.bn(fused_features)
        fused_features = self.relu(fused_features)
        return fused_features
# 使用示例
if __name__ == '__main__':
    hf = torch.randn(1, 32, 128, 160)
    df = torch.randn(1, 32, 128, 160)
    fusion_module = FusionModule(in_channels=32)
    fused_features = fusion_module(hf, df)
    print(fused_features.shape)  # 输出：torch.Size([1, 32, 128, 160])