import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch.nn import MSELoss, BCEWithLogitsLoss
class AEModel(nn.Module):
    """
    图自编码器 (Auto‐Encoder)：
      - Encoder：多层 GCN，将图中每个节点映射到隐藏表示 h_i
      - Node Decoder：MLP，将 h_i 解码回原始节点特征
      - Edge Decoder：MLP，对任意给定节点对 (i,j) 预测边的存在性 logits
    """
    def __init__(self, in_dim, hidden_dim, num_layers, dropout):
        super(AEModel, self).__init__()
        self.dropout = dropout

        # —— 编码器：多层 GCNConv ——
        self.convs = nn.ModuleList()
        # 第一层：输入维度 → 隐藏维度
        self.convs.append(GCNConv(in_dim, hidden_dim))
        # 之后的层：隐藏维度 → 隐藏维度
        for _ in range(num_layers - 1):
            self.convs.append(GCNConv(hidden_dim, hidden_dim))

        # —— 节点特征解码器 ——
        # hidden_dim → hidden_dim → in_dim
        self.node_decoder = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, in_dim),
        )

        # —— 边存在性解码器 ——
        # 将两个节点的隐藏向量拼接后，预测一条边是否存在
        self.edge_decoder = nn.Sequential(
            nn.Linear(2 * hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def loss_function(self, reconstructed_data, original_data, masked_data):
        """
        计算图补全的总损失，包含三部分：
        1. 全图节点特征重建损失 (MSE)
        2. 被mask节点特征重建损失 (MSE)
        3. 被mask边存在性预测损失 (BCE)
        """
        mse = MSELoss()
        bce = BCEWithLogitsLoss()

        # 解构返回值
        node_recon, edge_logits_fn = reconstructed_data
        full_data = original_data

        # LOSS1: 全图节点特征重建
        loss1 = mse(node_recon, full_data.x)

        # LOSS3: 仅被mask的节点特征重建
        m_nodes = masked_data.masked_info['masked_node_indices']
        orig_x = masked_data.masked_info['original_node_features']
        loss3 = mse(node_recon[m_nodes], orig_x)

        # LOSS2: 被mask边的存在性预测
        m_edges = masked_data.masked_info['masked_edge_index']
        logits = edge_logits_fn(m_edges)
        labels = torch.ones_like(logits)  # 被mask的边真实存在
        loss2 = bce(logits, labels)

        return loss1 + loss2 + loss3

    def encode(self, x, edge_index):
        """
        通过多层 GCN 计算每个节点的隐藏表示 h。
        """
        h = x
        for conv in self.convs:
            h = conv(h, edge_index)
            h = F.relu(h)
            h = F.dropout(h, p=self.dropout, training=self.training)
        return h

    def forward(self, data):
        """
        Args:
            data.x          Tensor[N, in_dim]        : 节点特征
            data.edge_index LongTensor[2, E]         : 图的边索引（残缺图）
        Returns:
            node_recon      Tensor[N, in_dim]        : 重建后的节点特征
            edge_logits_fn  Callable(edge_index_masked) -> Tensor[M] : 对给定边列表预测 logits
        """
        # 1) 编码得到节点隐藏向量 h
        h = self.encode(data.x, data.edge_index)

        # 2) 节点特征重建
        node_recon = self.node_decoder(h)

        # 3) 构造一个闭包，用于对任意给定的边索引做二分类预测
        def edge_logits_fn(edge_idx):
            # edge_idx: LongTensor[2, M]
            src, dst = edge_idx      # src: [M], dst: [M]
            h_src = h[src]           # [M, hidden_dim]
            h_dst = h[dst]           # [M, hidden_dim]
            h_cat = torch.cat([h_src, h_dst], dim=1)  # [M, 2*hidden_dim]
            logits = self.edge_decoder(h_cat).squeeze(-1)  # [M]
            return logits

        return node_recon, edge_logits_fn
