'''
@Time    : 2022/3/12 14:58
@Author  : Fu Junyu
@Site    : www.fujunyu.cn
@File    : GCN.py
@Software: PyCharm
'''
import torch
import time
import math
import dgl
import numpy as np
import torch.nn as nn
from dgl.data import citation_graph as citegrh
from dgl import DGLGraph
import dgl.function as fn
import networkx as nx
import torch.nn.functional as F

from dgl.nn import GraphConv
# from dgl.nn.pytorch import GraphConv
# from dgl.nn.pytorch.conv import GraphConv


# 利用 DGL 预定义的图卷积模块 GraphConv 来实现。
class GCN(nn.Module):
    def __init__(self, g, in_feats,  n_hidden, n_classes, n_layers, activation, dropout):
        super(GCN, self).__init__()
        self.g = g
        self.layers = nn.ModuleList()
        # input layer
        self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))
        # output layer
        for i in range(n_layers - 1):
            self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
        # output layer
        self.layers.append(GraphConv(n_hidden, n_classes))
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, features):
        h = features
        for i, layers in enumerate(self.layers):
            if i != 0:
                h = self.dropout(h)
            h = layers(self.g, h)
        return h


dropout = 0.5
gpu = -1
lr = 0.01
n_epochs = 200
n_hidden = 16  # 隐藏层节点的数量
n_layers = 2  # 输入层 + 输出层的数量
weight_decay = 5e-4  # 权重衰减
self_loop = True  # 自循环

# cora 数据集

data = citegrh.load_cora()

features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)

# print(features.shape)
# print(labels)
# print(train_mask)
# print(val_mask)

in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()

# 构建 DGLGraph
g = data.graph

if self_loop:
    g.remove_edges_from(nx.selfloop_edges(g))
    g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)



# 加载 GPU
if gpu < 0:
    cuda = False
else:
    cuda = True
    torch.cuda.set_device(gpu)
    features = features.cuda()
    labels = labels.cuda()
    train_mask = train_mask.cuda()
    val_mask = val_mask.cuda()
    test_mask = test_mask.cuda()

# 归一化，依据入度进行计算
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
    norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)

# 创建一个 GCN 的模型，可以选择上面的任意一个进行初始化
model = GCN(g,
            in_feats,
            n_hidden,
            n_classes,
            n_layers,
            F.relu,
            dropout)

# if cuda:
#     model.cuda()

# 采用交叉熵损失函数和 Adam 优化器
loss_fcn = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

# 定义一个评估函数
def evaluate(model, features, labels, mask):
    model.eval()
    with torch.no_grad():
        logits = model(features)
        logits = logits[mask]
        labels = labels[mask]
        _, indices = torch.max(logits, dim=1)
        correct = torch.sum(indices == labels)
        return correct.item() * 1.0 / len(labels)

# 训练，并评估
dur = []
for epoch in range(n_epochs):
    # model.train()
    t0 = time.time()
    # forward
    logits = model(features)
    # print(logits[train_mask])
    # print(labels[train_mask])

    loss = loss_fcn(logits[train_mask], labels[train_mask])

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    dur.append(time.time() - t0)

    if epoch % 10 == 0:
        acc = evaluate(model, features, labels, val_mask)
        print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
              "ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000))

acc = evaluate(model, features, labels, test_mask)
print("Test accuracy {:.2%}".format(acc))
