import time
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable

from gcn.src.data_loader.data_loader import load_data
from gcn.src.trainer.parser import get_parser_args
from gcn.src.util import accuracy
from gcn.src.model.model import GCN

args = get_parser_args()
# Load data
# 加载数据
# adj: adj样本关系的对称邻接矩阵的稀疏张量
# features: 样本特征张量
# labels: 样本标签
# idx_train: 训练集索引列表
# idx_val: 验证集索引列表
# idx_test: 测试集索引列表
adj, features, labels, idx_train, idx_val, idx_test = load_data()

# Model and optimizer
# 模型和优化器

# GCN模型
# nfeat输入单元数，shape[1]表示特征矩阵的维度数（列数）
# nhid中间层单元数量
# nclass输出单元数，即样本标签数=样本标签最大值+1
# dropout参数
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)

# 构造一个优化器对象Optimizer，用来保存当前的状态，并能够根据计算得到的梯度来更新参数
# Adam优化器
# lr学习率
# weight_decay权重衰减（L2惩罚）
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

# 如果使用GPU则执行这里，数据写入cuda，便于后续加速
if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

features, adj, labels = Variable(features), Variable(adj), Variable(labels)


# 定义训练函数
def train(epoch):
    # 返回当前时间
    t = time.time()

    # train的时候使用dropout, 测试的时候不使用dropout
    # pytorch里面eval()固定整个网络参数，没有dropout

    # 固定语句，主要针对启用BatchNormalization和Dropout
    model.train()

    # 把梯度置零，也就是把loss关于weight的导数变成0
    optimizer.zero_grad()
    # 执行GCN中的forward前向传播
    output = model(features, adj)
    # 最大似然/log似然损失函数，idx_train是140(0~139)
    # nll_loss: negative log likelihood loss
    # https://www.cnblogs.com/marsggbo/p/10401215.html
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    # 准确率
    acc_train = accuracy(output[idx_train], labels[idx_train])
    # 反向传播
    loss_train.backward()
    # 梯度下降，更新值
    optimizer.step()

    # Evaluate validation set performance separately,
    # deactivates dropout during validation run.
    # 是否在训练期间进行验证
    if not args.fastmode:
        # 固定语句，主要针对不启用BatchNormalization和Dropout
        model.eval()
        # 前向传播
        output = model(features, adj)

    # 最大似然/log似然损失函数，idx_val是300(200~499)
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    # 准确率
    acc_val = accuracy(output[idx_val], labels[idx_val])

    # 正在迭代的epoch数
    # 训练集损失函数值
    # 训练集准确率
    # 验证集损失函数值
    # 验证集准确率
    # 运行时间
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))


# 定义测试函数，相当于对已有的模型在测试集上运行对应的loss与accuracy
def test():
    # 固定语句，主要针对不启用BatchNormalization和Dropout
    model.eval()
    # 前向传播
    output = model(features, adj)
    # 最大似然/log似然损失函数，idx_test是1000(500~1499)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    # 准确率
    acc_test = accuracy(output[idx_test], labels[idx_test])

    # 测试集损失函数值
    # 测试集的准确率
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))


def main():
    print('training start!')
    # Train model
    t_total = time.time()
    # epoch数
    for epoch in range(args.epochs):
        # 训练
        train(epoch)
    print("Optimization Finished!")
    # 已用总时间
    print("Total time elapsed: {:.4f}s".format(time.time() - t_total))

    # Testing
    test()
    print('training finished!')
    # compute_test()


if __name__ == '__main__':
    main()
