import torch
import torch.nn as nn
import torch.nn.functional as F

class GNNLayer(nn.Module):
    def __init__(self, node_dim, edge_dim, hidden_dim):
        super(GNNLayer, self).__init__()
        # 用于节点自身特征变换
        self.W_n = nn.Linear(node_dim, hidden_dim, bias=False)
        # 用于邻居节点特征变换
        self.W_h = nn.Linear(node_dim, hidden_dim, bias=False)
        # 用于边特征变换
        self.W_e = nn.Linear(edge_dim, hidden_dim, bias=False)
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.xavier_uniform_(self.W_n.weight)
        nn.init.xavier_uniform_(self.W_h.weight)
        nn.init.xavier_uniform_(self.W_e.weight)

    def forward(self, x, edge_index, edge_attr):
        # x: [N, node_dim]
        # edge_index: [2, E]
        # edge_attr: [E, edge_dim]

        src, dst = edge_index
        # 消息 = 来自source节点的特征 + 边特征
        msg = self.W_h(x[src]) + self.W_e(edge_attr)  # [E, hidden_dim]

        # 根据dst聚合消息
        out = torch.zeros_like(self.W_n(x))
        out.index_add_(0, dst, msg)
        out = self.W_n(x) + out
        out = F.relu(out)
        return out


class GNNModel(nn.Module):
    """
    基于图神经网络的模型，用于图比对。
    """
    def __init__(self, input_dim, hidden_dim, output_dim, max_length, num_layers=3,zero_init=True,device='cpu'):
        """
        初始化模型。
        :param input_dim: 边的输入特征维度 (这里为3: normalized_length, dir_x, dir_y)
        :param hidden_dim: 隐藏层维度。
        :param output_dim: 最终嵌入维度。
        :param max_length: 用于边长度归一化的最大长度值。
        :param num_layers: GNN 的层数。
        """
        super(GNNModel, self).__init__()
        self.num_layers = num_layers
        self.max_length = max_length
        self.device = device

        # 节点初始特征维度和输出维度一致，用随机初始化打破对称
        node_dim = output_dim

        self.layers = nn.ModuleList()
        for i in range(num_layers):
            self.layers.append(GNNLayer(node_dim, input_dim, output_dim))
            node_dim = output_dim

    def forward(self, graph):
        _, edge_features, edge_index = graph.to_tensors(self.max_length, device=self.device)

        num_nodes = len(graph.nodes)
        # 初始化节点特征：随机初始化为 [N, output_dim]
        x = torch.randn(num_nodes, self.layers[0].W_n.weight.shape[0], device=self.device)

        for layer in self.layers:
            x = layer(x, edge_index, edge_features)

        # x为最终节点嵌入
        return x

    def compute_loss(self, node_embeddings_1, node_embeddings_2, ground_truth):
        """
        计算图之间匹配的损失函数。
        :param node_embeddings_1: [N1, d]
        :param node_embeddings_2: [N2, d]
        :param ground_truth: [N1, N2] 的匹配矩阵，元素为0或1
        """
        similarity = node_embeddings_1 @ node_embeddings_2.t()
        loss = F.mse_loss(similarity, ground_truth)
        return loss