import torch
import torch.nn as nn


class ChannelAttention(nn.Module):
    """通道注意力模块 - CBAM的通道注意力部分"""

    def __init__(self, in_channels: int, reduction: int = 16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc = nn.Sequential(
            nn.Conv2d(in_channels, in_channels // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels // reduction, in_channels, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        attention_weights = self.sigmoid(avg_out + max_out)
        return attention_weights


class SpatialAttention(nn.Module):
    """空间注意力模块 - CBAM的空间注意力部分"""

    def __init__(self, kernel_size: int = 7):
        super().__init__()
        self.conv = nn.Conv2d(
            2, 1, kernel_size,
            padding=kernel_size // 2,
            bias=False
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial_features = torch.cat([avg_out, max_out], dim=1)
        attention_weights = self.sigmoid(self.conv(spatial_features))
        return attention_weights


class CBAM(nn.Module):
    """CBAM注意力模块 - 结合通道和空间注意力"""

    def __init__(self, in_channels: int, reduction: int = 16, kernel_size: int = 7, dropout_rate: float = 0.1):
        super().__init__()
        self.channel_attention = ChannelAttention(in_channels, reduction)
        self.spatial_attention = SpatialAttention(kernel_size)
        self.dropout = nn.Dropout(dropout_rate)  # 添加Dropout层

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 应用通道注意力
        channel_weights = self.channel_attention(x)
        channel_weights = self.dropout(channel_weights)  # 对通道注意力权重应用Dropout
        x = x * channel_weights

        # 应用空间注意力
        spatial_weights = self.spatial_attention(x)
        spatial_weights = self.dropout(spatial_weights)  # 对空间注意力权重应用Dropout
        x = x * spatial_weights
        return x


class SelfAttention2D(nn.Module):
    """2D自注意力模块 - 轻量级实现"""

    def __init__(self, in_channels: int, reduction: int = 8, dropout_rate: float = 0.1):
        super().__init__()
        self.in_channels = in_channels
        self.hidden_channels = max(in_channels // reduction, 1)

        self.query_conv = nn.Conv2d(in_channels, self.hidden_channels, 1)
        self.key_conv = nn.Conv2d(in_channels, self.hidden_channels, 1)
        self.value_conv = nn.Conv2d(in_channels, in_channels, 1)

        self.gamma = nn.Parameter(torch.zeros(1))
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout_rate)  # 添加Dropout层

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        batch_size, channels, height, width = x.size()

        # 生成查询、键、值
        query = self._reshape_for_attention(self.query_conv(x), batch_size)
        key = self._reshape_for_attention(self.key_conv(x), batch_size)
        value = self._reshape_for_attention(self.value_conv(x), batch_size)

        # 计算注意力权重
        attention_weights = self._compute_attention(query, key)

        # 应用注意力
        attended_features = self._apply_attention(value, attention_weights)
        attended_features = attended_features.view(batch_size, channels, height, width)

        return self.gamma * attended_features + x

    def _reshape_for_attention(self, tensor: torch.Tensor, batch_size: int) -> torch.Tensor:
        """重塑张量用于注意力计算"""
        return tensor.view(batch_size, -1, tensor.size(-2) * tensor.size(-1))

    def _compute_attention(self, query: torch.Tensor, key: torch.Tensor) -> torch.Tensor:
        """计算注意力权重"""
        attention_scores = torch.bmm(query.permute(0, 2, 1), key)
        attention_weights = self.softmax(attention_scores)
        # 在注意力权重上应用Dropout
        attention_weights = self.dropout(attention_weights)
        return attention_weights

    def _apply_attention(self, value: torch.Tensor, attention_weights: torch.Tensor) -> torch.Tensor:
        """应用注意力权重"""
        return torch.bmm(value, attention_weights.permute(0, 2, 1))
