import torch
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
from torch.nn import Linear
from dgl.data import CiteseerGraphDataset
import dgl

# 加载数据集
dataset = CiteseerGraphDataset('./citeseer', force_reload=True,reverse_edge=False)
data = dataset[0]

# 打印所有节点数据字段以确认特征键名
print("Available node data fields:", data.ndata.keys())  # 关键调试步骤

# 发现Citeseer实际使用'feat'作为特征字段，但需要处理DGL的异构图结构
# 转换为同构图并保留特征
data = dgl.to_homogeneous(data, ndata=['feat', 'label', 'train_mask', 'val_mask', 'test_mask'])


# 转换为同构图（DGL默认是异构图）
# data = dgl.to_homogeneous(data)
x = data.ndata['feat']
y = data.ndata['label']
train_mask = data.ndata['train_mask']
val_mask = data.ndata['val_mask']
test_mask = data.ndata['test_mask']

print("Train nodes:", train_mask.sum().item())
print("Val nodes:", val_mask.sum().item())
print("Test nodes:", test_mask.sum().item())

# 获取PyG需要的格式
edge_index = torch.stack(data.edges(), dim=0)


num_features = x.shape[1]
num_classes = dataset.num_classes

# 在数据处理部分，获取edge_index之后添加以下代码
num_nodes = x.shape[0]
alpha = 0.1  # 跳转到虚拟节点的概率

# 创建虚拟节点的特征（使用所有节点特征的平均值）
virtual_node_feat = torch.mean(x, dim=0, keepdim=True)
x = torch.cat([x, virtual_node_feat], dim=0)

# 添加这些代码来更新masks和标签
train_mask = torch.cat([train_mask, torch.tensor([False])])
val_mask = torch.cat([val_mask, torch.tensor([False])])
test_mask = torch.cat([test_mask, torch.tensor([False])])
y = torch.cat([y, torch.tensor([0])])

# 创建与虚拟节点相连的边
virtual_node_idx = num_nodes  # 虚拟节点的索引

# 创建从所有节点到虚拟节点的边（入度）
to_virtual = torch.stack([
    torch.arange(num_nodes, device=edge_index.device),
    torch.full((num_nodes,), virtual_node_idx, device=edge_index.device)
], dim=0)

# 创建从虚拟节点到所有节点的边（出度）
from_virtual = torch.stack([
    torch.full((num_nodes,), virtual_node_idx, device=edge_index.device),
    torch.arange(num_nodes, device=edge_index.device)
], dim=0)

# 合并所有边
edge_index = torch.cat([edge_index, to_virtual, from_virtual], dim=1)

# 创建边权重
edge_weights = torch.ones(edge_index.size(1), device=edge_index.device)

# 设置到虚拟节点的边权重为alpha
virtual_in_mask = edge_index[1] == virtual_node_idx
edge_weights[virtual_in_mask] = alpha

# 设置从虚拟节点出发的边权重为1/n
virtual_out_mask = edge_index[0] == virtual_node_idx
edge_weights[virtual_out_mask] = 1.0 / num_nodes



# 定义有向图卷积层（保持原样）
class DirectedGCNConv(MessagePassing):
    def __init__(self, in_channels, out_channels):
        super().__init__(aggr='mean')
        self.lin_in = Linear(in_channels, out_channels, bias=False)
        self.lin_out = Linear(in_channels, out_channels, bias=False)
        self.lin_self = Linear(in_channels, out_channels, bias=True)

    def forward(self, x, edge_index, edge_weight=None):
        # 入边传播
        in_agg = self.propagate(edge_index, x=x, edge_weight=edge_weight)
        in_agg = self.lin_in(in_agg)
        
        # 出边传播（反转边方向）
        edge_index_out = torch.stack([edge_index[1], edge_index[0]], dim=0)
        edge_weight_out = edge_weight if edge_weight is not None else None
        out_agg = self.propagate(edge_index_out, x=x, edge_weight=edge_weight_out)
        out_agg = self.lin_out(out_agg)
        
        # 自环连接
        self_loop = self.lin_self(x)
        
        return self_loop + in_agg + out_agg

    def message(self, x_j, edge_weight=None):
        if edge_weight is not None:
            return edge_weight.view(-1, 1) * x_j
        return x_j

# 定义有向GCN模型
class DirectedGCN(torch.nn.Module):
    def __init__(self, num_features, hidden_channels, num_classes):
        super().__init__()
        self.conv1 = DirectedGCNConv(num_features, hidden_channels)
        self.conv2 = DirectedGCNConv(hidden_channels, num_classes)
        
    def forward(self, x, edge_index, edge_weight=None):
        x = self.conv1(x, edge_index, edge_weight)
        x = F.relu(x)
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DirectedGCN(
    num_features=num_features,
    hidden_channels=16,
    num_classes=num_classes
).to(device)

# 将数据转移到设备
x = x.to(device)
edge_index = edge_index.to(device)
y = y.to(device)
train_mask = train_mask.to(device)
val_mask = val_mask.to(device)
test_mask = test_mask.to(device)
edge_weights = edge_weights.to(device)

data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)


# 训练函数
def train():
    model.train()
    optimizer.zero_grad()
    out = model(x, edge_index, edge_weights)
    # 注意：只对原始节点计算损失
    loss = F.nll_loss(out[train_mask], y[train_mask])
    loss.backward()
    optimizer.step()
    return loss.item()

def test():
    model.eval()
    out = model(x, edge_index, edge_weights)
    pred = out.argmax(dim=1)
    accs = []
    for mask in [train_mask, val_mask, test_mask]:
        correct = pred[mask].eq(y[mask]).sum().item()
        acc = correct / mask.sum().item()
        accs.append(acc)
    return accs

# 训练循环
best_val_acc = test_acc = 0
for epoch in range(1000):
    loss = train()
    train_acc, val_acc, curr_test_acc = test()
    if val_acc > best_val_acc:
        best_val_acc = val_acc
        test_acc = curr_test_acc
    if epoch % 10 == 0:
        print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '
              f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, '
              f'Test: {curr_test_acc:.4f}')

print(f'Final Test Accuracy: {test_acc:.4f}')