import torch,time

from torch import Tensor
import torch.nn as nn
from torch.nn import Sequential, Linear, ReLU
import torch. nn. functional as F
#安装支持包：conda install pyg -c pyg -c conda-forge
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
from torch_geometric.nn import MessagePassing
# from torch_geometric. transforms import NormalizeFeatures
#transform可在输入前对数据进行变换normalization or data augmentation
# dataset = Planetoid(root=' data/Planetoid', name=' Cora',transform=NormalizeFeatures())
dataset = Planetoid(root='.', name='Cora')#下载到Cora/raw
# print(dataset)#Cora()
# print (f'Number of graphs: { len(dataset)}')#1
# print (f'Number of features: { dataset. num_features}')#1433，输入特征维度
# print (f'Number of classes: { dataset. num_classes}')#7，类别数

# 取图Get the first graph object.
data = dataset[0]
#Get the first graph object(里面为data的属性):
# print(data)
# Data(x=[2708, 1433], edge_index=[2, 10556], y=[2708],train_mask=[2708], val_mask=[2708], test_mask=[2708])
edge_index = data.edge_index#边索引：[源节点索引，目标节点索引]
# print(data.x)
# print ("Get the first graph object:",data)
# # Gather some statistics about the graph.
# print (f'Number of nodes: { data.num_nodes}')#2708
# print (f'Number of edges: { data.num_edges}')#10556
# print (f'Average node degree: { data. num_edges /data. num_nodes:.2f}')#3.9
# print (f'Number of training nodes: { data. train_mask.sum ()}')#140
# print (f'Training node label rate: { int (data. train_mask. sum()) /data. num_nodes:.2f}')#0.05
# print (f'Has isolated nodes: { data. has_isolated_nodes ()}')#False
# print (f'Has self-loops: { data. has_self_loops ()}')#False
# print (f'Is undirected: { data.is_undirected()}')#True


#图可视化
import networkx as nx
import matplotlib. pyplot as plt

#color-类别列, 举例color=data. y
def visualize_graph(G, color):
    plt. figure (figsize=(7, 7))
    plt. xticks ([])
    plt. yticks ([])
    nx. draw_networkx (G, pos=nx. spring_layout (G, seed=42), with_labels=False, node_color=color, cmap="Set2")
    plt.show()

#h-隐藏层表示，color-列类别
def visualize_embedding(h, color, epoch=None, loss=None):
    plt. figure (figsize=(7,7))
    plt. xticks ([])
    plt. yticks ([])
    h = h. detach(). cpu(). numpy ()
    plt. scatter(h[:, 0], h[:, 1], s=140, c=color, cmap="Set2")
    if epoch is not None and loss is not None:
        plt. xlabel (f' Epoch: (epoch)], Loss: (loss. item() :. 4f)', fontsize=16)
    plt. show()

from torch_geometric. utils import to_networkx
G=to_networkx (data,  to_undirected=True)
# visualize_graph (G, color=data. y)#节点太多-显示不清楚

#2层图卷积
class GCN_relu(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super().__init__()
        self.conv1 = GCNConv(in_channels, hidden_channels)
        self.conv2 = GCNConv(hidden_channels, out_channels)

    def forward(self, x: Tensor, edge_index: Tensor) -> Tensor:
        # x: Node feature matrix of shape [num_nodes, in_channels]
        # edge_index: Graph connectivity matrix of shape [2, num_edges]
        h = self.conv1(x, edge_index).relu()
        h = F.dropout(h, p=0.5, training = self.training)#丢弃
        h = self.conv2(h, edge_index)
        return h

model_1 = GCN_relu(dataset.num_features, 16, dataset.num_classes)#GCN(1433, 16,7)

#实现节点分类
class GCN_tanh(torch. nn. Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super().__init__()
        # torch.manual_seed(1234)#使用同样的随机初始化种子,使每次初始化相同
        self.conv1 = GCNConv(in_channels, hidden_channels)
        self.conv2 = GCNConv(hidden_channels, hidden_channels)
        self.conv3 = GCNConv(hidden_channels, 2)# Embedding shape: [in_channels, 2]
        self.classifier = Linear(2,out_channels)#分类器

    def forward(self, x, edge_index):
        h = self.conv1(x, edge_index)
        h = h.tanh()
        h = F.dropout(h, p=0.5, training=self.training)  # 丢弃
        h = self.conv2(h, edge_index)
        h = h.tanh()
        h = self.conv3(h, edge_index)
        h = h.tanh()
        # Final GNN embedding space.
        # Apply a final (linear) classifier.
        out = self.classifier(h)
        return out
        # return out,h

model_2= GCN_tanh(dataset.num_features,4, dataset.num_classes)#初始化
# print(model_2)#GCN_tanh(  (conv1): GCNConv(1433, 4)...
# _, h = model_2 (data.x, data.edge_index)#调用
# print (f' Embedding shape: {list(h.shape)}')#Embedding shape: [2708, 2]
# visualize_embedding (h, color=data. y)


#边卷积-edge convolutional layer from Wang et al.:
class EdgeConv(MessagePassing):
    def __init__(self, in_channels, out_channels):
        super().__init__(aggr="max")  # "Max" aggregation.
        self.mlp = Sequential(
            Linear(2 * in_channels, out_channels),
            ReLU(),
            Linear(out_channels, out_channels),
        )

    def forward(self, x: Tensor, edge_index: Tensor) -> Tensor:
        # x: Node feature matrix of shape [num_nodes, in_channels]
        # edge_index: Graph connectivity matrix of shape [2, num_edges]
        return self.propagate(edge_index, x=x)  # shape [num_nodes, out_channels]

    def message(self, x_j: Tensor, x_i: Tensor) -> Tensor:
        # x_j: Source node features of shape [num_edges, in_channels]
        # x_i: Target node features of shape [num_edges, in_channels]
        edge_features = torch.cat([x_i, x_j - x_i], dim=-1)
        return self.mlp(edge_features)  # shape [num_edges, out_channels]


#节点分类
from sklearn. manifold import TSNE


#多层感知机
class MLP(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super().__init__()
        torch.manual_seed(12345)
        self.lin1 = Linear(in_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, out_channels)

    def forward(self, x: Tensor) -> Tensor:
        # x: Node feature matrix of shape [num_nodes, in_channels]
        # edge_index: Graph connectivity matrix of shape [2, num_edges]
        h = self.lin1(x).relu()
        h = F.dropout(h, p=0.5, training = self.training)
        h = self.lin2(h)
        return h

model_mlp = MLP (dataset.num_features, 16,dataset.num_classes)
# print(model_mlp)

model_init_name=model_mlp#取指定模型

#训练与优化
import torch.optim as optim
criterion = nn.CrossEntropyLoss()#交叉熵损失
# optimizer = optim.SGD(model_init_name.parameters(), lr=0.001, momentum=0.9)
# optimizer = torch. optim. Adam (model_init_name.parameters(), lr=0.01) # Define optimizer
optimizer = torch. optim. Adam (model_init_name. parameters (), lr=0.01, weight_decay=5e-4)# Define optimizer.

def train(model_init_name,data):
    optimizer.zero_grad()  # Clear gradients.
    # out = model_init_name(data.x, data.edge_index)  # 调用双参forward-Perform a single forward pass
    out=model_init_name(data.x)#调用单参forward
    loss = criterion(out[data.train_mask],data.y[data.train_mask])  # Compute the loss solely based on the training nodes.
    loss.backward()  # Derive gradients.
    optimizer.step()  # Update parameters based on gradients.
    return loss

#model_init_name-初始化的模型，data = dataset[0]图数据
def test(model_init_name,data):
    model_init_name.eval()
    # out= model_init_name(data.x,data.edge_index) # 调用双参forward
    out=model_init_name(data.x)#调用单参forward
    pred = out.argmax(dim=1)  # Use the class with highest probability.取最大概率
    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.
    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.测试精确度
    return test_acc

for epoch in range (11):
    loss= train(model_init_name,data)
    print(f' Epoch: {epoch:03d}, Loss: {loss:.4f}')#print(f表示格式化字符串：冒号后接格式
    if epoch % 10 == 0:
        # visualize_embedding(h, color=data.y, epoch=epoch, loss=loss)#可视化嵌入表示
        time.sleep(0.3)

test_acc=test(model_init_name,data)
print (f' Test Accuracy: { test_acc:.4f}')