import torch
import torch.nn as nn
import dgl
import dgl.function as fn

# 1. 构造极简图数据（3个节点，2条边）
g = dgl.graph(([0, 1], [1, 2]))  # 边: 0->1, 1->2
g.ndata['h'] = torch.randn(3, 4)  # 节点特征(3节点×4维)


# 2. 定义GAT单层
class GATLayer(nn.Module):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        self.fc = nn.Linear(in_dim, out_dim)  # 特征变换
        self.attn_fc = nn.Linear(2 * out_dim, 1)  # 注意力计算

    # 消息函数：计算注意力权重
    def edge_attention(self, edges):
        z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
        a = self.attn_fc(z2)
        return {'e': torch.nn.functional.leaky_relu(a, 0.2)}  # 原始注意力分数

    # 消息传递+聚合
    def forward(self, g, h):
        z = self.fc(h)  # 线性变换
        g.ndata['z'] = z
        g.apply_edges(self.edge_attention)  # 计算每条边的注意力

        # 归一化注意力权重
        g.edata['a'] = dgl.ops.edge_softmax(g, g.edata['e'])

        # 消息传递：加权聚合
        g.update_all(
            fn.u_mul_e('z', 'a', 'm'),  # 消息=特征×注意力权重
            fn.sum('m', 'h_new')  # 聚合=加权求和
        )
        return g.ndata['h_new']


# 3. 测试运行
gat = GATLayer(4, 2)
new_h = gat(g, g.ndata['h'])
print("输入特征形状:", g.ndata['h'].shape)
print("输出特征形状:", new_h.shape)
