import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from dgl.data import CiteseerGraphDataset
import dgl

# 加载数据集并自动添加反向边
dataset = CiteseerGraphDataset('./citeseer', force_reload=True, reverse_edge=True)
data = dataset[0]

# 转换为同构图并保留特征
data = dgl.to_homogeneous(data, ndata=['feat', 'label', 'train_mask', 'val_mask', 'test_mask'])

# 转换为无向图（确保边是双向的）
# data = dgl.to_bidirected(data)

# 获取节点数据和标签
x = data.ndata['feat']
y = data.ndata['label']
train_mask = data.ndata['train_mask']
val_mask = data.ndata['val_mask']
test_mask = data.ndata['test_mask']

# 统计信息
print("Train nodes:", train_mask.sum().item())
print("Val nodes:", val_mask.sum().item())
print("Test nodes:", test_mask.sum().item())
print("Number of features:", x.shape[1])
print("Number of classes:", dataset.num_classes)

# 转换为PyG需要的边格式
edge_index = torch.stack(data.edges(), dim=0)

# 定义标准GCN模型
class GCN(torch.nn.Module):
    def __init__(self, num_features, hidden_channels, num_classes):
        super().__init__()
        self.conv1 = GCNConv(num_features, hidden_channels)
        self.conv2 = GCNConv(hidden_channels, num_classes)
        
    def forward(self, x, edge_index):
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)

# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GCN(
    num_features=x.shape[1],
    hidden_channels=16,
    num_classes=dataset.num_classes
).to(device)

# 转移数据到设备
x = x.to(device)
edge_index = edge_index.to(device)
y = y.to(device)
train_mask = train_mask.to(device)
val_mask = val_mask.to(device)
test_mask = test_mask.to(device)

# 优化器设置
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

# 训练函数
def train():
    model.train()
    optimizer.zero_grad()
    out = model(x, edge_index)
    loss = F.nll_loss(out[train_mask], y[train_mask])
    loss.backward()
    optimizer.step()
    return loss.item()

# 测试函数
def test():
    model.eval()
    with torch.no_grad():
        out = model(x, edge_index)
        pred = out.argmax(dim=1)
        accs = []
        for mask in [train_mask, val_mask, test_mask]:
            correct = pred[mask].eq(y[mask]).sum().item()
            acc = correct / mask.sum().item()
            accs.append(acc)
    return accs

# 训练循环
best_val_acc = test_acc = 0
for epoch in range(200):  # 适当减少epoch数，实际可调整
    loss = train()
    train_acc, val_acc, curr_test_acc = test()
    
    # 记录最佳验证集准确率对应的测试集准确率
    if val_acc > best_val_acc:
        best_val_acc = val_acc
        test_acc = curr_test_acc
    
    # 每10个epoch打印一次
    if epoch % 10 == 0:
        print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '
              f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, '
              f'Test: {curr_test_acc:.4f}')

print(f'Final Test Accuracy: {test_acc:.4f}')