import pickle

import torch
from torch import optim
from torch.utils.data import DataLoader
import numpy as np

from sdne.src.trainer.parser import get_parser_args
from sdne.src.data_loader.data_loader import Read_graph, Dataload
from sdne.src.model.model import MNN


def train(args):
    G, Adj, Node = Read_graph(args.input)
    # ./models/model.py中的MNN类，基于pytorch实现的论文中的自编码器
    model = MNN(Node, args.nhid0, args.nhid1, args.dropout, args.alpha)
    # Adam算法优化模型参数
    opt = optim.Adam(model.parameters(), lr=args.lr)
    # 设置模型的学习率的超参数
    scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=args.step_size, gamma=args.gamma)
    Data = Dataload(Adj, Node)
    # 按batchsize将训练样本分组
    Data = DataLoader(Data, batch_size=args.bs, shuffle=True)

    # 共训练epoch次数
    for epoch in range(1, args.epochs + 1):
        loss_sum, loss_L1, loss_L2, loss_reg = 0, 0, 0, 0
        # 每次训练组数：batchsize
        for index in Data:
            # Adj = N * N
            # batch_size = n
            # adj_batch = n * N
            adj_batch = Adj[index]
            # adj_mat = n * n
            adj_mat = adj_batch[:, index]

            # 将邻接矩阵中的为0项设为1，为1项设为beta
            b_mat = torch.ones_like(adj_batch)
            b_mat[adj_batch != 0] = args.beta

            # 在做BP之前将gradients置0因为是累加的
            opt.zero_grad()
            L_1st, L_2nd, L_all = model(adj_batch, adj_mat, b_mat)
            L_reg = 0
            for param in model.parameters():
                L_reg += args.nu1 * torch.sum(torch.abs(param)) + args.nu2 * torch.sum(param * param)
            # 将损失值和正则化项加在一起构成最终的损失函数
            Loss = L_all + L_reg
            # 计算梯度
            Loss.backward()
            # 根据梯度值更新参数值
            opt.step()
            # 记录相应部分loss值
            loss_sum += Loss
            # 一阶相似度的loss值
            loss_L1 += L_1st
            # 二阶相似度的loss值
            loss_L2 += L_2nd
            # 正则化项loss值
            loss_reg += L_reg
        scheduler.step(epoch)
        # 每次epoch输出训练情况，loss值等
        # print("The lr for epoch %d is %f" %(epoch, scheduler.get_lr()[0]))
        print("loss for epoch %d is:" % epoch)
        print("loss_sum is %f" % loss_sum)
        print("loss_L1 is %f" % loss_L1)
        print("loss_L2 is %f" % loss_L2)
        print("loss_reg is %f" % loss_reg)
    model.eval()
    embedding = model.savector(Adj)
    outVec = embedding.detach().numpy()
    np.savetxt(args.output, outVec)


def main():
    args = get_parser_args()
    print('training start!')
    train(args)
    # test_auto(args)
    print('training finished!')


if __name__ == '__main__':
    main()
