"""
Cross-ROI Self-Attention Module

实现ROI之间的空间自注意力，让ROI能够"看到"彼此：
1. 相对位置编码 - 编码ROI之间的空间关系
2. 多头自注意力 - 建模ROI间的交互
3. 残差连接 - 保留原始特征

应用场景：
- 连续裂纹被分割成多个ROI
- 相邻区域的上下文信息有助于分类
- 需要考虑空间布局的场景
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class RelativePositionEncoding(nn.Module):
    """
    相对位置编码模块

    编码ROI之间的相对空间关系：
    - 中心点距离 (Δcx, Δcy)
    - 尺度比例 (w1/w2, h1/h2)
    - 面积比例 (area1/area2)
    - 是否重叠 (IoU > 0)
    """

    def __init__(self, embed_dim=128):
        """
        Args:
            embed_dim: 位置编码的输出维度
        """
        super(RelativePositionEncoding, self).__init__()
        self.embed_dim = embed_dim

        # MLP将相对位置特征映射到嵌入空间
        # 输入: 10维相对位置特征
        # 输出: embed_dim维嵌入
        self.pos_mlp = nn.Sequential(
            nn.Linear(10, embed_dim),
            nn.ReLU(inplace=True),
            nn.Linear(embed_dim, embed_dim),
            nn.LayerNorm(embed_dim)
        )

    def compute_relative_position_features(self, roi_positions):
        """
        计算ROI之间的相对位置特征

        Args:
            roi_positions: [B, 4] 归一化的ROI位置 [cx, cy, w, h]

        Returns:
            rel_pos_features: [B, B, 10] 相对位置特征矩阵
        """
        B = roi_positions.shape[0]
        device = roi_positions.device

        # 提取坐标
        cx = roi_positions[:, 0]  # [B]
        cy = roi_positions[:, 1]  # [B]
        w = roi_positions[:, 2]   # [B]
        h = roi_positions[:, 3]   # [B]

        # 计算面积
        area = w * h  # [B]

        # 扩展维度以计算成对特征
        # [B, 1] 和 [1, B] 广播成 [B, B]
        cx_i = cx.unsqueeze(1)  # [B, 1]
        cy_i = cy.unsqueeze(1)
        w_i = w.unsqueeze(1)
        h_i = h.unsqueeze(1)
        area_i = area.unsqueeze(1)

        cx_j = cx.unsqueeze(0)  # [1, B]
        cy_j = cy.unsqueeze(0)
        w_j = w.unsqueeze(0)
        h_j = h.unsqueeze(0)
        area_j = area.unsqueeze(0)

        # 1. 中心点距离
        delta_cx = cx_i - cx_j  # [B, B]
        delta_cy = cy_i - cy_j  # [B, B]
        distance = torch.sqrt(delta_cx**2 + delta_cy**2 + 1e-6)  # [B, B]

        # 2. 尺度比例（使用log避免数值不稳定）
        log_w_ratio = torch.log(w_i / (w_j + 1e-6) + 1e-6)  # [B, B]
        log_h_ratio = torch.log(h_i / (h_j + 1e-6) + 1e-6)  # [B, B]

        # 3. 面积比例
        log_area_ratio = torch.log(area_i / (area_j + 1e-6) + 1e-6)  # [B, B]

        # 4. 计算IoU（判断是否重叠）
        x1_i = cx_i - w_i / 2
        y1_i = cy_i - h_i / 2
        x2_i = cx_i + w_i / 2
        y2_i = cy_i + h_i / 2

        x1_j = cx_j - w_j / 2
        y1_j = cy_j - h_j / 2
        x2_j = cx_j + w_j / 2
        y2_j = cy_j + h_j / 2

        # 计算交集
        inter_x1 = torch.max(x1_i, x1_j)
        inter_y1 = torch.max(y1_i, y1_j)
        inter_x2 = torch.min(x2_i, x2_j)
        inter_y2 = torch.min(y2_i, y2_j)

        inter_w = torch.clamp(inter_x2 - inter_x1, min=0)
        inter_h = torch.clamp(inter_y2 - inter_y1, min=0)
        inter_area = inter_w * inter_h

        # IoU
        union_area = area_i + area_j - inter_area + 1e-6
        iou = inter_area / union_area  # [B, B]

        # 5. 方位角（ROI_j相对于ROI_i的方向）
        angle = torch.atan2(delta_cy, delta_cx + 1e-6)  # [B, B], 范围[-π, π]

        # 拼接所有相对位置特征
        rel_pos_features = torch.stack([
            delta_cx,         # 0: 中心点x偏移
            delta_cy,         # 1: 中心点y偏移
            distance,         # 2: 中心点距离
            log_w_ratio,      # 3: 宽度比例(log)
            log_h_ratio,      # 4: 高度比例(log)
            log_area_ratio,   # 5: 面积比例(log)
            iou,              # 6: IoU（重叠程度）
            torch.cos(angle), # 7: 方位角cos
            torch.sin(angle), # 8: 方位角sin
            (iou > 0).float() # 9: 是否重叠（二值）
        ], dim=-1)  # [B, B, 10]

        return rel_pos_features

    def forward(self, roi_positions):
        """
        Args:
            roi_positions: [B, 4] 归一化的ROI位置 [cx, cy, w, h]

        Returns:
            pos_embeddings: [B, B, embed_dim] 相对位置编码
        """
        # 计算相对位置特征
        rel_pos_features = self.compute_relative_position_features(roi_positions)  # [B, B, 10]

        # 通过MLP映射到嵌入空间
        B = roi_positions.shape[0]
        pos_embeddings = self.pos_mlp(rel_pos_features.view(-1, 10))  # [B*B, embed_dim]
        pos_embeddings = pos_embeddings.view(B, B, self.embed_dim)    # [B, B, embed_dim]

        return pos_embeddings


class MultiHeadSelfAttention(nn.Module):
    """
    多头自注意力模块

    实现ROI之间的自注意力计算
    """

    def __init__(self, feature_dim, num_heads=8, dropout=0.1):
        """
        Args:
            feature_dim: 特征维度
            num_heads: 注意力头数
            dropout: Dropout率
        """
        super(MultiHeadSelfAttention, self).__init__()
        assert feature_dim % num_heads == 0, "feature_dim must be divisible by num_heads"

        self.feature_dim = feature_dim
        self.num_heads = num_heads
        self.head_dim = feature_dim // num_heads

        # Q, K, V投影
        self.q_proj = nn.Linear(feature_dim, feature_dim)
        self.k_proj = nn.Linear(feature_dim, feature_dim)
        self.v_proj = nn.Linear(feature_dim, feature_dim)

        # 输出投影
        self.out_proj = nn.Linear(feature_dim, feature_dim)
        self.dropout = nn.Dropout(dropout)

        # Layer Normalization
        self.norm = nn.LayerNorm(feature_dim)

        # 缩放因子
        self.scale = self.head_dim ** -0.5

    def forward(self, features, pos_bias=None, attention_mask=None):
        """
        Args:
            features: [B, feature_dim] ROI特征
            pos_bias: [B, B, num_heads] 位置偏置（可选）
            attention_mask: [B, B] 注意力掩码（可选）

        Returns:
            out: [B, feature_dim] 增强后的特征
            attn_weights: [B, num_heads, B] 注意力权重（用于可视化）
        """
        B, D = features.shape

        # 投影到Q, K, V
        Q = self.q_proj(features)  # [B, D]
        K = self.k_proj(features)  # [B, D]
        V = self.v_proj(features)  # [B, D]

        # 拆分成多头
        # [B, D] -> [B, num_heads, head_dim]
        Q = Q.view(B, self.num_heads, self.head_dim)
        K = K.view(B, self.num_heads, self.head_dim)
        V = V.view(B, self.num_heads, self.head_dim)

        # 计算注意力分数
        # [B, num_heads, head_dim] @ [B, num_heads, head_dim]^T -> [B, num_heads, B]
        # 使用einsum: query[b,h,d] * key[B,h,d] -> scores[b,h,B]
        # where 'b' is query batch, 'B' is key batch (they're the same for self-attention)
        attn_scores = torch.einsum('bhd,Bhd->bhB', Q, K) * self.scale  # [B, num_heads, B]

        # 加入位置偏置（如果提供）
        if pos_bias is not None:
            # pos_bias: [B, B, num_heads] -> [B, num_heads, B]
            # Take diagonal or average, since we now have [B, num_heads, B] instead of [B, num_heads, B, B]
            pos_bias = pos_bias.permute(0, 2, 1)  # [B, num_heads, B]
            attn_scores = attn_scores + pos_bias

        # 应用注意力掩码（如果提供）
        if attention_mask is not None:
            # attention_mask: [B, B] -> [B, 1, B]
            attention_mask = attention_mask.unsqueeze(1)  # [B, 1, B]
            attn_scores = attn_scores.masked_fill(attention_mask == 0, float('-inf'))

        # Softmax归一化
        attn_weights = F.softmax(attn_scores, dim=-1)  # [B, num_heads, B]
        attn_weights = self.dropout(attn_weights)

        # 应用注意力到Value
        # [B, num_heads, B] @ [B, num_heads, head_dim] -> [B, num_heads, head_dim]
        # einsum: attn[b,h,B] * V[B,h,d] -> output[b,h,d]
        attn_output = torch.einsum('bhB,Bhd->bhd', attn_weights, V)

        # 合并多头
        # [B, num_heads, head_dim] -> [B, D]
        # Use reshape instead of view for safety (handles non-contiguous tensors)
        attn_output = attn_output.reshape(B, self.feature_dim)
        out = attn_output

        # 输出投影
        out = self.out_proj(out)
        out = self.dropout(out)

        # Layer Norm
        out = self.norm(out)

        return out, attn_weights


class CrossROISelfAttention(nn.Module):
    """
    跨ROI自注意力模块

    让batch内的多个ROI能够互相"看到"，利用空间上下文辅助判断：
    1. 相对位置编码：告诉模型ROI之间的空间关系
    2. 自注意力：计算ROI之间的相关性
    3. 残差连接：保留原始特征
    """

    def __init__(self,
                 feature_dim=512,
                 num_heads=8,
                 dropout=0.1,
                 position_embed_dim=128,
                 use_relative_pos=True):
        """
        Args:
            feature_dim: ROI特征维度
            num_heads: 多头注意力头数
            dropout: Dropout率
            position_embed_dim: 位置编码维度
            use_relative_pos: 是否使用相对位置编码
        """
        super(CrossROISelfAttention, self).__init__()

        self.feature_dim = feature_dim
        self.use_relative_pos = use_relative_pos

        # 相对位置编码
        if use_relative_pos:
            self.rel_pos_encoding = RelativePositionEncoding(embed_dim=position_embed_dim)

            # 位置编码转换为注意力偏置
            self.pos_to_bias = nn.Sequential(
                nn.Linear(position_embed_dim, num_heads),
                nn.Tanh()
            )

        # 多头自注意力
        self.self_attention = MultiHeadSelfAttention(
            feature_dim=feature_dim,
            num_heads=num_heads,
            dropout=dropout
        )

        # 前馈网络（Feed-Forward Network）
        self.ffn = nn.Sequential(
            nn.Linear(feature_dim, feature_dim * 4),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(feature_dim * 4, feature_dim),
            nn.Dropout(dropout)
        )
        self.ffn_norm = nn.LayerNorm(feature_dim)

    def forward(self, roi_features, roi_positions=None):
        """
        Args:
            roi_features: [B, feature_dim] ROI特征向量
            roi_positions: [B, 4] ROI位置 [cx, cy, w, h]（归一化到[0,1]）

        Returns:
            enhanced_features: [B, feature_dim] 增强后的特征
            attention_weights: [B, num_heads, B] 注意力权重（用于可视化）
        """
        B, D = roi_features.shape

        # 计算位置偏置
        pos_bias = None
        if self.use_relative_pos and roi_positions is not None:
            # 相对位置编码
            pos_embeddings = self.rel_pos_encoding(roi_positions)  # [B, B, position_embed_dim]

            # 转换为注意力偏置
            pos_bias = self.pos_to_bias(pos_embeddings)  # [B, B, num_heads]

        # 自注意力
        attn_out, attn_weights = self.self_attention(roi_features, pos_bias=pos_bias)

        # 残差连接
        features = roi_features + attn_out

        # 前馈网络
        ffn_out = self.ffn(features)
        features = features + ffn_out
        features = self.ffn_norm(features)

        return features, attn_weights


def test_cross_roi_attention():
    """测试跨ROI自注意力模块"""
    print("Testing CrossROISelfAttention...")

    # 模拟输入
    B = 8  # 8个ROI
    feature_dim = 512

    roi_features = torch.randn(B, feature_dim)

    # 模拟ROI位置（归一化到[0,1]）
    roi_positions = torch.tensor([
        [0.2, 0.3, 0.1, 0.1],  # ROI 1: 左上角小框
        [0.25, 0.35, 0.1, 0.1], # ROI 2: 靠近ROI 1
        [0.8, 0.8, 0.15, 0.15], # ROI 3: 右下角
        [0.5, 0.5, 0.2, 0.2],   # ROI 4: 中心大框
        [0.1, 0.9, 0.1, 0.1],   # ROI 5: 左下角
        [0.9, 0.1, 0.1, 0.1],   # ROI 6: 右上角
        [0.6, 0.3, 0.1, 0.1],   # ROI 7: 右上区域
        [0.3, 0.7, 0.1, 0.1],   # ROI 8: 左下区域
    ], dtype=torch.float32)

    # 创建模块
    cross_roi_attn = CrossROISelfAttention(
        feature_dim=feature_dim,
        num_heads=8,
        dropout=0.1,
        position_embed_dim=128,
        use_relative_pos=True
    )

    # 前向传播
    enhanced_features, attn_weights = cross_roi_attn(roi_features, roi_positions)

    print(f"Input shapes:")
    print(f"  ROI features:  {roi_features.shape}")
    print(f"  ROI positions: {roi_positions.shape}")
    print(f"Output shapes:")
    print(f"  Enhanced features: {enhanced_features.shape}")
    print(f"  Attention weights: {attn_weights.shape}")

    # 可视化注意力权重（第一个头）
    print("\nAttention weights for head 0 (first 5 ROIs):")
    attn_head0 = attn_weights[:5, 0, :5].detach()  # [first_5_ROIs, head_0, all_ROIs]
    print(attn_head0)
    print("\n注意: ROI 1和2相邻，应该有较高的注意力权重")

    assert enhanced_features.shape == (B, feature_dim), "Output shape mismatch!"
    print("\n✅ Test passed!")


if __name__ == '__main__':
    test_cross_roi_attention()
