import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn


class GATLayer(nn.Module):
    def __init__(self, in_feats, out_feats):
        super().__init__()
        self.fc = nn.Linear(in_feats, out_feats)
        self.attn_fc = nn.Linear(2 * out_feats, 1)

    # 消息函数：计算带权消息
    def message_func(self, edges):
        z_src = edges.src['z']
        z_dst = edges.dst['z']
        a = self.attn_fc(torch.cat([z_src, z_dst], dim=1))
        return {'m': z_src, 'a': F.leaky_relu(a, 0.2)}

    # 聚合函数：加权求和
    def reduce_func(self, nodes):
        alpha = F.softmax(nodes.mailbox['a'], dim=1)
        h = torch.sum(alpha * nodes.mailbox['m'], dim=1)
        return {'h_new': h}

    # 更新函数：特征变换
    def update_func(self, nodes):
        return {'h': nodes.data['h_new']}

    def forward(self, g, h):
        z = self.fc(h)
        g.ndata['z'] = z
        g.apply_edges(self.message_func)
        g.update_all(message_func=self.message_func,
                     reduce_func=self.reduce_func,
                     apply_node_func=self.update_func)
        return g.ndata['h']


# 测试数据
g = dgl.graph(([0, 1, 2], [1, 2, 0]))
g.ndata['h'] = torch.randn(3, 5)
layer = GATLayer(5, 3)
out = layer(g, g.ndata['h'])
print(out.shape)  # 应输出 torch.Size([3, 3])
