"""
Attention Diversity Loss

用于多头注意力的互补性约束，防止"头塌陷"(head collapse)。

支持三种损失类型：
1. Cosine Similarity Loss - 惩罚头之间的余弦相似度
2. KL Divergence Loss - 惩罚注意力分布的KL散度
3. Orthogonality Loss - 鼓励注意力向量正交

参考文献：
- "Analyzing Multi-Head Self-Attention: Specialized Heads Do the Heavy Lifting" (ACL 2019)
- "Are Sixteen Heads Really Better than One?" (NeurIPS 2019)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F


class AttentionDiversityLoss(nn.Module):
    """
    注意力多样性损失

    防止多头注意力中的头塌陷问题，鼓励不同的头学习不同的注意力模式。
    """

    def __init__(self, diversity_type='cosine', reduction='mean'):
        """
        Args:
            diversity_type: 多样性度量类型
                - 'cosine': 余弦相似度（推荐）
                - 'kl': KL散度
                - 'orthogonal': 正交性约束
            reduction: 'mean', 'sum', 'none'
        """
        super().__init__()
        self.diversity_type = diversity_type
        self.reduction = reduction

    def forward(self, attention_weights):
        """
        计算多头注意力的多样性损失

        Args:
            attention_weights: [B, num_heads, N, N] 注意力权重矩阵
                - B: batch size (ROI数量)
                - num_heads: 注意力头数
                - N: 序列长度（通常N=B，因为是ROI之间的注意力）

        Returns:
            loss: 多样性损失（标量或向量，取决于reduction）
        """
        if self.diversity_type == 'cosine':
            return self._cosine_similarity_loss(attention_weights)
        elif self.diversity_type == 'kl':
            return self._kl_divergence_loss(attention_weights)
        elif self.diversity_type == 'orthogonal':
            return self._orthogonality_loss(attention_weights)
        else:
            raise ValueError(f"Unknown diversity type: {self.diversity_type}")

    def _cosine_similarity_loss(self, attention_weights):
        """
        余弦相似度损失（推荐）

        思想：惩罚不同头之间的高余弦相似度
        目标：让不同头的注意力模式尽量不同

        数学形式：
            L = (1/K^2) * Σ_i Σ_j cos_sim(head_i, head_j)
            其中 cos_sim(a, b) = (a·b) / (||a|| ||b||)
        """
        B, num_heads, N, _ = attention_weights.shape

        # 展平注意力矩阵: [B, num_heads, N*N]
        attn_flat = attention_weights.view(B, num_heads, -1)

        # 归一化（计算余弦相似度需要）
        attn_norm = F.normalize(attn_flat, p=2, dim=-1)  # [B, num_heads, N*N]

        # 计算头之间的余弦相似度矩阵
        # [B, num_heads, N*N] @ [B, N*N, num_heads] -> [B, num_heads, num_heads]
        similarity_matrix = torch.bmm(attn_norm, attn_norm.transpose(1, 2))

        # 去掉对角线（头与自己的相似度总是1）
        # 创建掩码：对角线为0，其他为1
        mask = 1.0 - torch.eye(num_heads, device=attention_weights.device)
        mask = mask.unsqueeze(0).expand(B, -1, -1)  # [B, num_heads, num_heads]

        # 只保留非对角线元素
        similarity_matrix = similarity_matrix * mask

        # 计算平均相似度（越小越好）
        # 非对角线元素数量: num_heads * (num_heads - 1)
        num_pairs = num_heads * (num_heads - 1)
        loss = similarity_matrix.sum(dim=[1, 2]) / num_pairs  # [B]

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        else:
            return loss

    def _kl_divergence_loss(self, attention_weights):
        """
        KL散度损失

        思想：惩罚注意力分布之间的KL散度过小
        目标：让不同头的概率分布尽量不同

        数学形式：
            L = -1 / (K^2) * Σ_i Σ_j KL(P_i || P_j)
            (取负号因为我们希望KL散度大)
        """
        B, num_heads, N, _ = attention_weights.shape

        # 确保是概率分布（沿最后一维求和为1）
        # 注意：attention_weights已经是softmax后的结果
        attn = attention_weights.view(B, num_heads, -1)  # [B, num_heads, N*N]

        # 为数值稳定性添加小常数
        attn = attn + 1e-8

        # 计算所有头对之间的KL散度
        total_kl = 0.0
        num_pairs = 0

        for i in range(num_heads):
            for j in range(i + 1, num_heads):  # 只计算上三角（避免重复）
                # KL(P_i || P_j)
                kl_ij = (attn[:, i] * (torch.log(attn[:, i]) - torch.log(attn[:, j]))).sum(dim=-1)
                # KL(P_j || P_i)
                kl_ji = (attn[:, j] * (torch.log(attn[:, j]) - torch.log(attn[:, i]))).sum(dim=-1)

                # 对称KL散度
                total_kl += (kl_ij + kl_ji) / 2
                num_pairs += 1

        # 取负号（因为我们希望最大化KL散度 = 最小化负KL散度）
        loss = -total_kl / num_pairs  # [B]

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        else:
            return loss

    def _orthogonality_loss(self, attention_weights):
        """
        正交性损失

        思想：鼓励不同头的注意力向量正交
        目标：A^T A = I（单位矩阵）

        数学形式：
            L = || A^T A - I ||_F^2
            其中 ||·||_F 是Frobenius范数
        """
        B, num_heads, N, _ = attention_weights.shape

        # 展平注意力矩阵
        attn_flat = attention_weights.view(B, num_heads, -1)  # [B, num_heads, N*N]

        # 归一化（使得 ||head_i|| = 1）
        attn_norm = F.normalize(attn_flat, p=2, dim=-1)

        # 计算 A^T A
        gram_matrix = torch.bmm(attn_norm, attn_norm.transpose(1, 2))  # [B, num_heads, num_heads]

        # 单位矩阵
        identity = torch.eye(num_heads, device=attention_weights.device).unsqueeze(0)  # [1, num_heads, num_heads]
        identity = identity.expand(B, -1, -1)

        # Frobenius范数: || A^T A - I ||_F^2
        loss = torch.norm(gram_matrix - identity, p='fro', dim=[1, 2]) ** 2  # [B]

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        else:
            return loss


def analyze_head_diversity(attention_weights):
    """
    分析多头注意力的多样性

    用于可视化和调试，了解不同头的相似度。

    Args:
        attention_weights: [B, num_heads, N, N] 注意力权重

    Returns:
        metrics: dict包含多样性指标
    """
    B, num_heads, N, _ = attention_weights.shape

    # 展平
    attn_flat = attention_weights.view(B, num_heads, -1)
    attn_norm = F.normalize(attn_flat, p=2, dim=-1)

    # 计算相似度矩阵
    similarity_matrix = torch.bmm(attn_norm, attn_norm.transpose(1, 2))  # [B, num_heads, num_heads]

    # 平均相似度矩阵（batch维度平均）
    avg_similarity = similarity_matrix.mean(dim=0).cpu().numpy()  # [num_heads, num_heads]

    # 去掉对角线，计算平均非对角相似度
    mask = 1.0 - torch.eye(num_heads)
    off_diagonal_sim = (similarity_matrix * mask.unsqueeze(0).to(similarity_matrix.device)).sum(dim=[1, 2])
    off_diagonal_sim /= (num_heads * (num_heads - 1))

    metrics = {
        'similarity_matrix': avg_similarity,
        'avg_off_diagonal_similarity': off_diagonal_sim.mean().item(),
        'max_similarity': avg_similarity[~torch.eye(num_heads, dtype=bool)].max(),
        'min_similarity': avg_similarity[~torch.eye(num_heads, dtype=bool)].min(),
    }

    return metrics


# ===== 集成到CrossROISelfAttention =====

class CrossROISelfAttentionWithDiversity(nn.Module):
    """
    带多样性损失的跨ROI自注意力

    在原有CrossROISelfAttention基础上，添加可选的多样性损失。
    """

    def __init__(self,
                 feature_dim=512,
                 num_heads=8,
                 dropout=0.1,
                 position_embed_dim=128,
                 use_relative_pos=True,
                 use_diversity_loss=False,
                 diversity_type='cosine',
                 diversity_weight=0.01):
        """
        Args:
            ... (其他参数同CrossROISelfAttention)
            use_diversity_loss: 是否启用多样性损失
            diversity_type: 多样性损失类型 ('cosine', 'kl', 'orthogonal')
            diversity_weight: 多样性损失权重
        """
        super().__init__()

        # 导入原有的CrossROISelfAttention
        from src.models.cross_roi_attention import CrossROISelfAttention

        # 初始化基础模块
        self.cross_roi_attn = CrossROISelfAttention(
            feature_dim=feature_dim,
            num_heads=num_heads,
            dropout=dropout,
            position_embed_dim=position_embed_dim,
            use_relative_pos=use_relative_pos
        )

        # 多样性损失
        self.use_diversity_loss = use_diversity_loss
        self.diversity_weight = diversity_weight

        if use_diversity_loss:
            self.diversity_loss_fn = AttentionDiversityLoss(
                diversity_type=diversity_type,
                reduction='mean'
            )

    def forward(self, roi_features, roi_positions=None, return_diversity_loss=False):
        """
        前向传播

        Args:
            roi_features: [B, feature_dim]
            roi_positions: [B, 4]
            return_diversity_loss: 是否返回多样性损失（训练时为True）

        Returns:
            enhanced_features: [B, feature_dim]
            attention_weights: [B, num_heads, B, B] (如果需要可视化)
            diversity_loss: 多样性损失（如果return_diversity_loss=True）
        """
        # 调用原有的跨ROI注意力
        enhanced_features, attention_weights = self.cross_roi_attn(roi_features, roi_positions)

        # 计算多样性损失
        diversity_loss = None
        if self.use_diversity_loss and return_diversity_loss:
            diversity_loss = self.diversity_loss_fn(attention_weights)

        return enhanced_features, attention_weights, diversity_loss


# ===== 测试代码 =====

def test_diversity_loss():
    """测试多样性损失"""
    print("Testing Attention Diversity Loss...")

    B, num_heads, N = 8, 8, 8

    # 场景1: 完全相同的头（最坏情况）
    print("\n1. Worst case: All heads identical")
    attn_identical = torch.softmax(torch.randn(B, 1, N, N), dim=-1)
    attn_identical = attn_identical.expand(B, num_heads, N, N)

    for loss_type in ['cosine', 'kl', 'orthogonal']:
        loss_fn = AttentionDiversityLoss(diversity_type=loss_type)
        loss = loss_fn(attn_identical)
        print(f"   {loss_type}: {loss.item():.4f}")

    # 场景2: 完全随机的头（理想情况）
    print("\n2. Best case: All heads random")
    attn_random = torch.softmax(torch.randn(B, num_heads, N, N), dim=-1)

    for loss_type in ['cosine', 'kl', 'orthogonal']:
        loss_fn = AttentionDiversityLoss(diversity_type=loss_type)
        loss = loss_fn(attn_random)
        print(f"   {loss_type}: {loss.item():.4f}")

    # 场景3: 分析多样性
    print("\n3. Analyzing head diversity")
    metrics = analyze_head_diversity(attn_random)
    print(f"   Avg off-diagonal similarity: {metrics['avg_off_diagonal_similarity']:.4f}")
    print(f"   Max similarity: {metrics['max_similarity']:.4f}")
    print(f"   Min similarity: {metrics['min_similarity']:.4f}")

    print("\n✅ Diversity loss test passed!")


def test_integration():
    """测试集成到CrossROISelfAttention"""
    print("\nTesting integration with CrossROISelfAttention...")

    B = 8
    feature_dim = 512

    roi_features = torch.randn(B, feature_dim)
    roi_positions = torch.rand(B, 4)

    # 创建带多样性损失的模块
    model = CrossROISelfAttentionWithDiversity(
        feature_dim=feature_dim,
        num_heads=8,
        use_diversity_loss=True,
        diversity_type='cosine',
        diversity_weight=0.01
    )

    # 前向传播
    enhanced, attn_weights, div_loss = model(roi_features, roi_positions, return_diversity_loss=True)

    print(f"Enhanced features: {enhanced.shape}")
    print(f"Attention weights: {attn_weights.shape}")
    print(f"Diversity loss: {div_loss.item():.4f}")

    print("\n✅ Integration test passed!")


if __name__ == '__main__':
    test_diversity_loss()
    test_integration()
