import time

import numpy as np
import random
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
from torch.autograd import Variable

from graphsage.src.data_loader.data_loader import load_cora, load_pubmed
from graphsage.src.model.layers import MeanAggregator, Encoder
from graphsage.src.model.model import SupervisedGraphSage


def run_cora():
    # 随机数设置seed(种子)
    np.random.seed(1)
    random.seed(1)
    # cora数据集点数
    num_nodes = 2708
    # 加载cora数据集, 分别是
    # feat_data: 特征
    # labels: 标签
    # adj_lists: 邻接表，dict (key: node, value: neighbors set)
    feat_data, labels, adj_lists = load_cora()
    # 设置输入的input features矩阵X的维度 = 点的数量 * 特征维度
    features = nn.Embedding(2708, 1433)
    # 为矩阵X赋值，参数不更新
    features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
    # features.cuda()

    # 一共两层GNN layer
    # 第一层GNN
    # 以mean的方式聚合邻居, algorithm 1 line 4
    agg1 = MeanAggregator(features, cuda=True)
    # 将自身和聚合邻居的向量拼接后送入到神经网络(可选是否只用聚合邻居的信息来表示), algorithm 1 line 5
    enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=True, cuda=False)

    # 第二层GNN
    # 将第一层的GNN输出作为输入传进去
    # 这里面.t()表示转置，是因为Encoder class的输出维度为embed_dim * nodes
    agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=False)
    # enc1.embed_dim = 128, 变换后的维度还是128
    enc2 = Encoder(lambda nodes: enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
                   base_model=enc1, gcn=True, cuda=False)

    # 采样的邻居点的数量
    enc1.num_samples = 5
    enc2.num_samples = 5

    # 7分类问题
    # enc2是经过两层GNN layer时候得到的 node embedding/features
    graphsage = SupervisedGraphSage(7, enc2)
    # graphsage.cuda()

    # 目的是打乱节点顺序
    rand_indices = np.random.permutation(num_nodes)

    # 划分测试集、验证集、训练集
    test = rand_indices[:1000]
    val = rand_indices[1000:1500]
    train = list(rand_indices[1500:])

    # 用SGD的优化，设置学习率
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, graphsage.parameters()), lr=0.7)
    # 记录每个batch训练时间
    times = []
    # 共训练100个batch
    for batch in range(100):
        # 取256个nodes作为一个batch
        batch_nodes = train[:256]
        # 打乱训练集的顺序，使下次迭代batch随机
        random.shuffle(train)
        # 记录开始时间
        start_time = time.time()
        optimizer.zero_grad()
        # 这个是SupervisedGraphSage里面定义的cross entropy loss
        loss = graphsage.loss(batch_nodes,
                              Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
        # 反向传播和更新参数
        loss.backward()
        optimizer.step()
        # 记录结束时间
        end_time = time.time()
        times.append(end_time - start_time)
        # print (batch, loss.data[0])
        print(batch, loss.data)

    # 做validation
    val_output = graphsage.forward(val)
    # 计算micro F1 score
    print("Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro"))
    # 计算每个batch的平均训练时间
    print("Average batch time:", np.mean(times))


def run_pubmed():
    np.random.seed(1)
    random.seed(1)
    num_nodes = 19717
    feat_data, labels, adj_lists = load_pubmed()
    features = nn.Embedding(19717, 500)
    features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
    # features.cuda()

    agg1 = MeanAggregator(features, cuda=True)
    enc1 = Encoder(features, 500, 128, adj_lists, agg1, gcn=True, cuda=False)
    agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=False)
    enc2 = Encoder(lambda nodes: enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
                   base_model=enc1, gcn=True, cuda=False)
    enc1.num_samples = 10
    enc2.num_samples = 25

    graphsage = SupervisedGraphSage(3, enc2)
    #    graphsage.cuda()
    rand_indices = np.random.permutation(num_nodes)
    test = rand_indices[:1000]
    val = rand_indices[1000:1500]
    train = list(rand_indices[1500:])

    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, graphsage.parameters()), lr=0.7)
    times = []
    for batch in range(200):
        batch_nodes = train[:1024]
        random.shuffle(train)
        start_time = time.time()
        optimizer.zero_grad()
        loss = graphsage.loss(batch_nodes,
                              Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
        loss.backward()
        optimizer.step()
        end_time = time.time()
        times.append(end_time - start_time)
        print(batch, loss.data[0])

    val_output = graphsage.forward(val)
    print("Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro"))
    print("Average batch time:", np.mean(times))


if __name__ == "__main__":
    run_cora()
