"""
Dynamic Attention Fusion Module

实现三种注意力机制的联合：
1. Channel Attention - 学习通道重要性
2. Spatial Attention - 学习空间位置重要性
3. Cross-Attention - 实现特征流之间的交互增强

参考:
- CBAM (Convolutional Block Attention Module)
- Cross-Attention from Transformer
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.config import IMPROVED_MODEL_CONFIG


class ChannelAttention(nn.Module):
    """
    通道注意力模块

    通过全局上下文学习每个通道的重要性权重
    使用max pooling和avg pooling的组合以获取更丰富的信息
    """

    def __init__(self, in_channels, reduction_ratio=16):
        """
        Args:
            in_channels: 输入特征通道数
            reduction_ratio: 压缩比，控制中间层维度
        """
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        # Shared MLP
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction_ratio, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(in_channels // reduction_ratio, in_channels, bias=False)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        """
        Args:
            x: [B, C, H, W]
        Returns:
            attention: [B, C, 1, 1] 通道注意力权重
        """
        B, C, H, W = x.shape

        # 全局池化获取通道级统计信息
        avg_out = self.avg_pool(x).view(B, C)  # [B, C]
        max_out = self.max_pool(x).view(B, C)  # [B, C]

        # 通过共享MLP学习通道关系
        avg_out = self.fc(avg_out)  # [B, C]
        max_out = self.fc(max_out)  # [B, C]

        # 融合两种池化结果
        out = avg_out + max_out  # [B, C]
        attention = self.sigmoid(out).view(B, C, 1, 1)  # [B, C, 1, 1]

        return attention


class SpatialAttention(nn.Module):
    """
    空间注意力模块

    学习空间位置的重要性，帮助模型聚焦于重要区域
    """

    def __init__(self, kernel_size=7):
        """
        Args:
            kernel_size: 卷积核大小，推荐7x7以捕获更大的感受野
        """
        super(SpatialAttention, self).__init__()
        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1

        # 使用大卷积核捕获空间上下文
        self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        """
        Args:
            x: [B, C, H, W]
        Returns:
            attention: [B, 1, H, W] 空间注意力权重
        """
        # 沿通道维度池化，保留空间信息
        avg_out = torch.mean(x, dim=1, keepdim=True)  # [B, 1, H, W]
        max_out, _ = torch.max(x, dim=1, keepdim=True)  # [B, 1, H, W]

        # 拼接两种池化结果
        x_cat = torch.cat([avg_out, max_out], dim=1)  # [B, 2, H, W]

        # 通过卷积学习空间关系
        attention = self.sigmoid(self.conv(x_cat))  # [B, 1, H, W]

        return attention


class CrossAttentionFusion(nn.Module):
    """
    交叉注意力融合模块

    实现两个特征流之间的交互：
    - Query来自一个特征流
    - Key和Value来自另一个特征流
    - 让两个特征流互相"看到"并增强
    """

    def __init__(self, query_channels, kv_channels, out_channels, num_heads=8, dropout=0.1):
        """
        Args:
            query_channels: Query特征的通道数
            kv_channels: Key/Value特征的通道数
            out_channels: 输出通道数
            num_heads: 多头注意力的头数
            dropout: Dropout率
        """
        super(CrossAttentionFusion, self).__init__()
        self.num_heads = num_heads
        self.out_channels = out_channels
        self.head_dim = out_channels // num_heads

        assert out_channels % num_heads == 0, "out_channels must be divisible by num_heads"

        # Q, K, V投影层
        self.q_proj = nn.Linear(query_channels, out_channels)
        self.k_proj = nn.Linear(kv_channels, out_channels)
        self.v_proj = nn.Linear(kv_channels, out_channels)

        # 输出投影
        self.out_proj = nn.Linear(out_channels, out_channels)
        self.dropout = nn.Dropout(dropout)

        # Layer Normalization
        self.norm = nn.LayerNorm(out_channels)

    def forward(self, query_feat, kv_feat):
        """
        Args:
            query_feat: [B, C_q, H, W] Query特征（如OverLoCK）
            kv_feat: [B, C_kv, H, W] Key/Value特征（如DINOv2）
        Returns:
            out: [B, out_channels, H, W] 融合后的特征
        """
        B, C_q, H, W = query_feat.shape
        _, C_kv, _, _ = kv_feat.shape

        # 将空间维度展平，方便计算注意力
        # [B, C, H, W] -> [B, H*W, C]
        query = query_feat.flatten(2).permute(0, 2, 1)  # [B, HW, C_q]
        kv = kv_feat.flatten(2).permute(0, 2, 1)        # [B, HW, C_kv]

        # 投影到Q, K, V
        Q = self.q_proj(query)  # [B, HW, out_channels]
        K = self.k_proj(kv)     # [B, HW, out_channels]
        V = self.v_proj(kv)     # [B, HW, out_channels]

        # 多头注意力：拆分成多个头
        # [B, HW, out_channels] -> [B, HW, num_heads, head_dim] -> [B, num_heads, HW, head_dim]
        Q = Q.view(B, H*W, self.num_heads, self.head_dim).transpose(1, 2)
        K = K.view(B, H*W, self.num_heads, self.head_dim).transpose(1, 2)
        V = V.view(B, H*W, self.num_heads, self.head_dim).transpose(1, 2)

        # 计算注意力分数
        # [B, num_heads, HW, head_dim] @ [B, num_heads, head_dim, HW] -> [B, num_heads, HW, HW]
        scale = self.head_dim ** -0.5
        attn_scores = torch.matmul(Q, K.transpose(-2, -1)) * scale
        attn_weights = F.softmax(attn_scores, dim=-1)
        attn_weights = self.dropout(attn_weights)

        # 应用注意力到Value
        # [B, num_heads, HW, HW] @ [B, num_heads, HW, head_dim] -> [B, num_heads, HW, head_dim]
        attn_output = torch.matmul(attn_weights, V)

        # 合并多头
        # [B, num_heads, HW, head_dim] -> [B, HW, num_heads, head_dim] -> [B, HW, out_channels]
        attn_output = attn_output.transpose(1, 2).contiguous().view(B, H*W, self.out_channels)

        # 输出投影
        out = self.out_proj(attn_output)  # [B, HW, out_channels]
        out = self.dropout(out)

        # Layer Norm
        out = self.norm(out)

        # 恢复空间维度: [B, HW, out_channels] -> [B, out_channels, H, W]
        out = out.permute(0, 2, 1).view(B, self.out_channels, H, W)

        return out


class DynamicAttentionFusion(nn.Module):
    """
    动态注意力融合模块

    集成三种注意力机制，实现内容自适应的特征融合：
    1. Channel Attention: 学习通道重要性（哪些通道重要）
    2. Spatial Attention: 学习空间重要性（哪些位置重要）
    3. Cross-Attention: 实现特征交互（两个特征流互相增强）
    """

    def __init__(self,
                 overlock_channels=640,
                 dino_channels=384,
                 fusion_channels=512,
                 use_channel_attn=True,
                 use_spatial_attn=True,
                 use_cross_attn=True,
                 reduction_ratio=16,
                 spatial_kernel_size=7,
                 num_heads=8,
                 dropout=0.1):
        """
        Args:
            overlock_channels: OverLoCK特征通道数
            dino_channels: DINOv2特征通道数
            fusion_channels: 融合后的输出通道数
            use_channel_attn: 是否使用通道注意力
            use_spatial_attn: 是否使用空间注意力
            use_cross_attn: 是否使用交叉注意力
            reduction_ratio: 通道注意力的压缩比
            spatial_kernel_size: 空间注意力的卷积核大小
            num_heads: 交叉注意力的头数
            dropout: Dropout率
        """
        super(DynamicAttentionFusion, self).__init__()

        self.use_channel_attn = use_channel_attn
        self.use_spatial_attn = use_spatial_attn
        self.use_cross_attn = use_cross_attn

        # 1. 通道注意力模块
        if use_channel_attn:
            self.overlock_channel_attn = ChannelAttention(overlock_channels, reduction_ratio)
            self.dino_channel_attn = ChannelAttention(dino_channels, reduction_ratio)

        # 2. 空间注意力模块（在特征拼接后应用）
        if use_spatial_attn:
            self.spatial_attn = SpatialAttention(spatial_kernel_size)

        # 3. 交叉注意力模块
        if use_cross_attn:
            # OverLoCK -> DINOv2的交叉注意力
            self.cross_attn_o2d = CrossAttentionFusion(
                query_channels=overlock_channels,
                kv_channels=dino_channels,
                out_channels=fusion_channels // 2,
                num_heads=num_heads,
                dropout=dropout
            )
            # DINOv2 -> OverLoCK的交叉注意力（双向交互）
            self.cross_attn_d2o = CrossAttentionFusion(
                query_channels=dino_channels,
                kv_channels=overlock_channels,
                out_channels=fusion_channels // 2,
                num_heads=num_heads,
                dropout=dropout
            )
        else:
            # 如果不使用交叉注意力，使用简单的卷积融合
            self.simple_fusion = nn.Sequential(
                nn.Conv2d(overlock_channels + dino_channels, fusion_channels, 1),
                nn.BatchNorm2d(fusion_channels),
                nn.ReLU(inplace=True)
            )

        # 最终融合层
        if use_cross_attn:
            self.final_fusion = nn.Sequential(
                nn.Conv2d(fusion_channels, fusion_channels, 3, padding=1),
                nn.BatchNorm2d(fusion_channels),
                nn.ReLU(inplace=True)
            )

    def forward(self, overlock_feat, dino_feat):
        """
        Args:
            overlock_feat: [B, 640, H, W] OverLoCK特征
            dino_feat: [B, 384, H, W] DINOv2特征
        Returns:
            fused_feat: [B, fusion_channels, H, W] 融合后的特征
        """
        B, C_o, H_o, W_o = overlock_feat.shape
        B, C_d, H_d, W_d = dino_feat.shape

        # 确保空间尺寸一致（如果需要，调整DINOv2特征大小）
        if (H_d, W_d) != (H_o, W_o):
            dino_feat = F.interpolate(dino_feat, size=(H_o, W_o),
                                     mode='bilinear', align_corners=False)

        # ===== 步骤1: 通道注意力 =====
        if self.use_channel_attn:
            # 计算通道注意力权重
            overlock_ca = self.overlock_channel_attn(overlock_feat)  # [B, C_o, 1, 1]
            dino_ca = self.dino_channel_attn(dino_feat)              # [B, C_d, 1, 1]

            # 应用通道注意力
            overlock_weighted = overlock_feat * overlock_ca  # [B, C_o, H, W]
            dino_weighted = dino_feat * dino_ca              # [B, C_d, H, W]
        else:
            overlock_weighted = overlock_feat
            dino_weighted = dino_feat

        # ===== 步骤2: 交叉注意力融合 =====
        if self.use_cross_attn:
            # 双向交叉注意力
            # OverLoCK查询DINOv2
            o2d_feat = self.cross_attn_o2d(overlock_weighted, dino_weighted)  # [B, C/2, H, W]
            # DINOv2查询OverLoCK
            d2o_feat = self.cross_attn_d2o(dino_weighted, overlock_weighted)  # [B, C/2, H, W]

            # 拼接双向增强的特征
            fused_feat = torch.cat([o2d_feat, d2o_feat], dim=1)  # [B, C, H, W]

            # 最终融合
            fused_feat = self.final_fusion(fused_feat)
        else:
            # 简单拼接融合
            concat_feat = torch.cat([overlock_weighted, dino_weighted], dim=1)
            fused_feat = self.simple_fusion(concat_feat)

        # ===== 步骤3: 空间注意力 =====
        if self.use_spatial_attn:
            # 计算空间注意力权重
            spatial_attn = self.spatial_attn(fused_feat)  # [B, 1, H, W]
            # 应用空间注意力
            fused_feat = fused_feat * spatial_attn  # [B, C, H, W]

        return fused_feat


def test_dynamic_fusion():
    """测试动态注意力融合模块"""
    print("Testing DynamicAttentionFusion...")

    # 模拟输入
    B, H, W = 4, 16, 16
    overlock_feat = torch.randn(B, 640, H, W)
    dino_feat = torch.randn(B, 384, H, W)

    # 创建模块
    fusion = DynamicAttentionFusion(
        overlock_channels=640,
        dino_channels=384,
        fusion_channels=512,
        use_channel_attn=True,
        use_spatial_attn=True,
        use_cross_attn=True
    )

    # 前向传播
    output = fusion(overlock_feat, dino_feat)

    print(f"Input shapes:")
    print(f"  OverLoCK: {overlock_feat.shape}")
    print(f"  DINOv2:   {dino_feat.shape}")
    print(f"Output shape: {output.shape}")
    print(f"Expected: [{B}, 512, {H}, {W}]")

    assert output.shape == (B, 512, H, W), "Output shape mismatch!"
    print("✅ Test passed!")


if __name__ == '__main__':
    test_dynamic_fusion()
