import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable

from ggnn.src.trainer.parser import get_parser_args
from ggnn.src.data_loader.data_loader import bAbIDataset, bAbIDataloader
from ggnn.src.model.model import GGNN


def train(epoch, dataloader, net, criterion, optimizer, opt):
    net.train()
    for i, (adj_matrix, annotation, target) in enumerate(dataloader, 0):
        net.zero_grad()
        # 计算要补0的维度
        padding = torch.zeros(len(annotation), opt.n_node, opt.state_dim - opt.annotation_dim).double()
        # padding补0
        init_input = torch.cat((annotation, padding), 2)
        # 使用GPU
        if opt.cuda:
            init_input = init_input.cuda()
            adj_matrix = adj_matrix.cuda()
            annotation = annotation.cuda()
            target = target.cuda()

        # 转换成Variable
        init_input = Variable(init_input)
        adj_matrix = Variable(adj_matrix)
        annotation = Variable(annotation)
        target = Variable(target)

        # net是GGNN模型
        output = net(init_input, annotation, adj_matrix)

        # 计算CrossEntropy loss
        loss = criterion(output, target)

        loss.backward()
        optimizer.step()

        if i % int(len(dataloader) / 10 + 1) == 0 and opt.verbal:
            print('[%d/%d][%d/%d] Loss: %.4f' % (epoch, opt.niter, i, len(dataloader), loss.data[0]))


def test(dataloader, net, criterion, optimizer, opt):
    test_loss = 0
    correct = 0
    net.eval()
    for i, (adj_matrix, annotation, target) in enumerate(dataloader, 0):
        padding = torch.zeros(len(annotation), opt.n_node, opt.state_dim - opt.annotation_dim).double()
        init_input = torch.cat((annotation, padding), 2)
        if opt.cuda:
            init_input = init_input.cuda()
            adj_matrix = adj_matrix.cuda()
            annotation = annotation.cuda()
            target = target.cuda()

        init_input = Variable(init_input)
        adj_matrix = Variable(adj_matrix)
        annotation = Variable(annotation)
        target = Variable(target)

        output = net(init_input, annotation, adj_matrix)

        # python2
        # test_loss += criterion(output, target).data[0]
        test_loss += criterion(output, target).data
        pred = output.data.max(1, keepdim=True)[1]

        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(dataloader.dataset)
    print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
        test_loss, correct, len(dataloader.dataset),
        100. * correct / len(dataloader.dataset)))


def main(opt):
    # 读入训练集
    # dataroot: 数据集路径, question_id: bAbI问题序号, is_train: True
    train_dataset = bAbIDataset(opt.dataroot, opt.question_id, True)
    # 封装成DataLoader
    train_dataloader = bAbIDataloader(train_dataset, batch_size=opt.batchSize, \
                                      shuffle=True, num_workers=2)
    # 读入测试集
    test_dataset = bAbIDataset(opt.dataroot, opt.question_id, False)
    test_dataloader = bAbIDataloader(test_dataset, batch_size=opt.batchSize, \
                                     shuffle=False, num_workers=2)

    opt.annotation_dim = 1  # for bAbI
    opt.n_edge_types = train_dataset.n_edge_types
    opt.n_node = train_dataset.n_node
    # 按opt中的参数初始化模型
    net = GGNN(opt)
    # 将parameters等转换成double
    net.double()
    print(net)

    # CrossEntropy作为损失函数
    criterion = nn.CrossEntropyLoss()

    # 使用GPU训练
    if opt.cuda:
        net.cuda()
        criterion.cuda()

    # adam优化器和学习率设置
    optimizer = optim.Adam(net.parameters(), lr=opt.lr)

    # epochs数量
    for epoch in range(0, opt.niter):
        # 模型训练
        train(epoch, train_dataloader, net, criterion, optimizer, opt)
        # 模型测试
        test(test_dataloader, net, criterion, optimizer, opt)


if __name__ == '__main__':
    args = get_parser_args()
    main(args)
