"""
APPNP (Approximate Personalized Propagation of Neural Predictions) 层实现

参考论文: "Predict then Propagate: Graph Neural Networks meet Personalized PageRank"
核心思想: 将特征变换和图传播解耦，避免over-smoothing问题

作者: 改进版DiffPool项目
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from device_config import get_device


class APPNPConv(nn.Module):
    """
    APPNP传播层
    
    使用个性化PageRank (PPR) 的思想进行信息传播:
        Z^(0) = H
        Z^(k+1) = (1-α) * P @ Z^(k) + α * H
    
    其中:
        - H: 输入特征（通常是MLP变换后的结果）
        - P: 归一化的传播矩阵 D^(-1/2) @ A @ D^(-1/2)
        - α: PageRank的"传送"概率（通常0.1-0.2）
        - K: 传播步数（通常10-20）
    
    参数:
        K (int): 传播迭代步数，默认10
        alpha (float): PageRank传送概率，默认0.1
        add_self_loops (bool): 是否添加自环，默认True
        cached (bool): 是否缓存传播矩阵，默认False
    """
    
    def __init__(self, K=10, alpha=0.1, add_self_loops=True, cached=False):
        super(APPNPConv, self).__init__()
        self.K = K
        self.alpha = alpha
        self.add_self_loops = add_self_loops
        self.cached = cached
        self._cached_P = None
        self._cached_adj_hash = None
    
    def forward(self, x, adj):
        """
        前向传播
        
        参数:
            x: Tensor [N, F] - 节点特征
            adj: Tensor [N, N] - 邻接矩阵（可以是稀疏或密集）
        
        返回:
            Tensor [N, F] - 传播后的节点特征
        """
        # 计算或获取缓存的传播矩阵
        P = self._get_propagation_matrix(adj)
        
        # APPNP传播
        h = x
        for _ in range(self.K):
            # Z^(k+1) = (1-α) * P @ Z^(k) + α * H
            h = (1 - self.alpha) * torch.matmul(P, h) + self.alpha * x
        
        return h
    
    def _get_propagation_matrix(self, adj):
        """
        计算或获取缓存的传播矩阵 P = D^(-1/2) @ A @ D^(-1/2)
        
        参数:
            adj: Tensor [N, N] - 邻接矩阵
        
        返回:
            Tensor [N, N] - 归一化的传播矩阵
        """
        # 如果使用缓存且已缓存，检查邻接矩阵是否改变
        if self.cached and self._cached_P is not None:
            # 简单的哈希检查（实际应用中可能需要更robust的方法）
            adj_hash = hash(adj.shape[0])
            if adj_hash == self._cached_adj_hash:
                return self._cached_P
        
        # 计算传播矩阵
        P = self._compute_propagation_matrix(adj)
        
        # 缓存
        if self.cached:
            self._cached_P = P
            self._cached_adj_hash = hash(adj.shape[0])
        
        return P
    
    def _compute_propagation_matrix(self, adj):
        """
        计算归一化的传播矩阵 P = D^(-1/2) @ (A + I) @ D^(-1/2)
        
        参数:
            adj: Tensor [N, N] - 邻接矩阵
        
        返回:
            Tensor [N, N] - 归一化的传播矩阵
        """
        device = adj.device
        N = adj.size(0)
        
        # 添加自环
        if self.add_self_loops:
            adj_with_self = adj + torch.eye(N, device=device)
        else:
            adj_with_self = adj
        
        # 计算度矩阵
        deg = adj_with_self.sum(dim=1)  # [N]
        
        # 处理孤立节点（度为0）
        deg = torch.clamp(deg, min=1e-12)
        
        # D^(-1/2)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[torch.isinf(deg_inv_sqrt)] = 0.0
        
        # 构造对角矩阵（用广播）
        # P = D^(-1/2) @ A @ D^(-1/2)
        P = deg_inv_sqrt.view(-1, 1) * adj_with_self * deg_inv_sqrt.view(1, -1)
        
        return P
    
    def __repr__(self):
        return f'{self.__class__.__name__}(K={self.K}, alpha={self.alpha})'


class APPNPPowerIteration(nn.Module):
    """
    APPNP的幂迭代版本（用于大规模图）
    
    使用稀疏矩阵运算，适合大规模图
    """
    
    def __init__(self, K=10, alpha=0.1):
        super(APPNPPowerIteration, self).__init__()
        self.K = K
        self.alpha = alpha
    
    def forward(self, x, edge_index, edge_weight=None, num_nodes=None):
        """
        前向传播（稀疏版本）
        
        参数:
            x: Tensor [N, F] - 节点特征
            edge_index: LongTensor [2, E] - 边索引
            edge_weight: Tensor [E] - 边权重（可选）
            num_nodes: int - 节点数量（可选）
        
        返回:
            Tensor [N, F] - 传播后的节点特征
        """
        if num_nodes is None:
            num_nodes = x.size(0)
        
        # 归一化边权重（如果需要）
        if edge_weight is None:
            edge_weight = torch.ones(edge_index.size(1), device=edge_index.device)
        
        # 计算度并归一化
        row, col = edge_index
        deg = torch.zeros(num_nodes, device=x.device)
        deg.scatter_add_(0, row, edge_weight)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[torch.isinf(deg_inv_sqrt)] = 0.0
        
        # 归一化边权重
        edge_weight_norm = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
        
        # APPNP传播
        h = x
        for _ in range(self.K):
            # 稀疏矩阵乘法
            h_prop = self._sparse_mm(edge_index, edge_weight_norm, h, num_nodes)
            h = (1 - self.alpha) * h_prop + self.alpha * x
        
        return h
    
    def _sparse_mm(self, edge_index, edge_weight, x, num_nodes):
        """稀疏矩阵乘法"""
        row, col = edge_index
        out = torch.zeros(num_nodes, x.size(1), device=x.device)
        
        # 聚合邻居特征
        weighted_x = edge_weight.view(-1, 1) * x[col]
        out.index_add_(0, row, weighted_x)
        
        return out
    
    def __repr__(self):
        return f'{self.__class__.__name__}(K={self.K}, alpha={self.alpha})'


class PrecomputedAPPNP(nn.Module):
    """
    预计算版APPNP（用于静态图，最快）
    
    在初始化时一次性计算整个传播矩阵的幂级数:
        P_final = α * Σ_{k=0}^{K-1} [(1-α)^k * P^k]
    
    优点: 前向传播只需一次矩阵乘法
    缺点: 需要存储完整的传播矩阵，内存占用大
    """
    
    def __init__(self, adj, K=10, alpha=0.1, add_self_loops=True):
        super(PrecomputedAPPNP, self).__init__()
        self.K = K
        self.alpha = alpha
        
        # 预计算传播矩阵
        P_final = self._precompute_propagation(adj, K, alpha, add_self_loops)
        
        # 注册为buffer（不参与梯度计算，但会被保存）
        self.register_buffer('P_final', P_final)
    
    def forward(self, x, adj=None):
        """
        前向传播（只需一次矩阵乘法！）
        
        参数:
            x: Tensor [N, F] - 节点特征
            adj: 忽略（已预计算）
        
        返回:
            Tensor [N, F] - 传播后的节点特征
        """
        return torch.matmul(self.P_final, x)
    
    def _precompute_propagation(self, adj, K, alpha, add_self_loops):
        """
        预计算最终的传播矩阵
        
        P_final = α * (I + (1-α)*P + (1-α)^2*P^2 + ... + (1-α)^(K-1)*P^(K-1))
        """
        device = adj.device
        N = adj.size(0)
        
        # 添加自环
        if add_self_loops:
            adj_with_self = adj + torch.eye(N, device=device)
        else:
            adj_with_self = adj
        
        # 归一化
        deg = adj_with_self.sum(dim=1)
        deg = torch.clamp(deg, min=1e-12)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[torch.isinf(deg_inv_sqrt)] = 0.0
        P = deg_inv_sqrt.view(-1, 1) * adj_with_self * deg_inv_sqrt.view(1, -1)
        
        # 计算幂级数
        P_final = alpha * torch.eye(N, device=device)
        P_power = torch.eye(N, device=device)
        
        for k in range(1, K):
            P_power = torch.matmul(P_power, P)
            P_final += alpha * ((1 - alpha) ** k) * P_power
        
        return P_final
    
    def __repr__(self):
        return f'{self.__class__.__name__}(K={self.K}, alpha={self.alpha}, precomputed=True)'


def test_appnp():
    """测试APPNP层的功能"""
    print("=" * 60)
    print("测试APPNP传播层")
    print("=" * 60)
    
    # 创建测试数据
    N = 10  # 节点数
    F = 5   # 特征维度
    
    # 随机特征
    x = torch.randn(N, F)
    
    # 随机邻接矩阵（对称）
    adj = torch.rand(N, N)
    adj = (adj + adj.t()) / 2
    adj = (adj > 0.5).float()
    
    print(f"\n输入特征: {x.shape}")
    print(f"邻接矩阵: {adj.shape}, 边数: {adj.sum().item()}")
    
    # 测试标准APPNP
    print("\n1. 标准APPNP:")
    appnp = APPNPConv(K=10, alpha=0.1)
    out = appnp(x, adj)
    print(f"   输出: {out.shape}")
    print(f"   输出范围: [{out.min().item():.3f}, {out.max().item():.3f}]")
    
    # 测试预计算APPNP
    print("\n2. 预计算APPNP:")
    appnp_pre = PrecomputedAPPNP(adj, K=10, alpha=0.1)
    out_pre = appnp_pre(x)
    print(f"   输出: {out_pre.shape}")
    print(f"   与标准版差异: {(out - out_pre).abs().max().item():.6f}")
    
    # 测试不同的K和alpha
    print("\n3. 不同超参数的影响:")
    for K in [5, 10, 20]:
        for alpha in [0.05, 0.1, 0.2]:
            appnp_test = APPNPConv(K=K, alpha=alpha)
            out_test = appnp_test(x, adj)
            smoothness = (out_test[1:] - out_test[:-1]).abs().mean()
            print(f"   K={K:2d}, α={alpha:.2f}: 平滑度={smoothness:.4f}")
    
    print("\n" + "=" * 60)
    print("测试完成!")
    print("=" * 60)


if __name__ == "__main__":
    test_appnp()

