from src.dataset import PolyvoreDataset, CompatibilityBenchmarkDataset, FITBDataset
from src.const import base_path
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.utils.data
from torch import nn
import numpy as np
from torch.nn import functional as F
from src.const import base_path
from src import const
from src.utils import parse_args_and_merge_const, load_json, build_vocab
from tensorboardX import SummaryWriter
import os
from src.utils import CompatibilityBenchmarkHelper, FITBBenchMarkHelper


def evaluate(net, step, writer, comp_dataset, fitb_dataset):
    print('Now Evaluate..')
    with torch.no_grad():
        net.eval()
        helper = FITBBenchMarkHelper(fitb_dataset, net)
        fitb_benchmark = helper.get_benchmark()
        print('FITB Benchmark at Step {} : {}'.format(step, fitb_benchmark))
        writer.add_scalar('metrics/fitb_benchmark_accuracy', fitb_benchmark, step)
        helper = CompatibilityBenchmarkHelper(comp_dataset, net)
        comp_benchmark = helper.get_benchmark()
        print('Compatibility Benchmark at Step {} : {}'.format(step, comp_benchmark))
        writer.add_scalar('metrics/comp_benchmark_auc', comp_benchmark, step)


if __name__ == '__main__':
    args = parse_args_and_merge_const()
    assert (args.model == "")
    if os.path.exists('models') is False:
        os.makedirs('models')
    folder_name = os.path.join('models', const.MODEL_NAME)
    if not(os.path.exists(folder_name)):
        os.makedirs(folder_name)

    train_set = load_json(os.path.join(const.base_path, 'train_no_dup.json'))
    valid_set = load_json(os.path.join(const.base_path, 'valid_no_dup.json'))
    test_set = load_json(os.path.join(const.base_path, 'test_no_dup.json'))
    vocab = build_vocab(train_set)
    vocab.save(os.path.join(folder_name, "vocab.pkl"))
    train_dataset = PolyvoreDataset(train_set, const.DATASET_PROC_METHOD_TRAIN, vocab)
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)
    comp_dataset = CompatibilityBenchmarkDataset(const.DATASET_PROC_METHOD_VAL)
    fitb_dataset = FITBDataset(const.DATASET_PROC_METHOD_VAL)

    if const.USE_PRETRAINED_WORD_EMBEDDING:
        net = const.USE_NET(pretrained_embeddings=vocab.get_embeddings())
    else:
        print("Not use pretrained word embedding!")
        net = const.USE_NET(pretrained_embeddings=None)
    net = net.to(const.device)

    learning_rate = const.LEARNING_RATE
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)

    writer = SummaryWriter(const.TRAIN_DIR)

    total_step = len(train_dataloader)
    step = 0
    for epoch in range(const.NUM_EPOCH):
        net.train()
        for i, sample in enumerate(train_dataloader):
            step += 1
            for key in sample:
                if isinstance(sample[key], torch.Tensor):
                    sample[key] = sample[key].to(const.device)
            output = net(sample)
            loss = net.cal_loss(sample, output)

            optimizer.zero_grad()
            loss['all'].backward()
            optimizer.step()

            if (i + 1) % 10 == 0:
                for ls in loss['structure']:
                    name = ls[0]
                    writer.add_scalar('loss/{}'.format(name), ls[2].item(), step)
                    writer.add_scalar('loss_weighted/{}'.format(name), ls[1] * ls[2].item(), step)
                writer.add_scalar('loss_weighted/all', loss['all'].item(), step)
                writer.add_scalar('global/learning_rate', learning_rate, step)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch + 1, const.NUM_EPOCH, i + 1, total_step, loss['all'].item()))
            if step % const.SAVE_EVERY_STEPS == 0:
                print('Saving Model....')
                net.set_buffer('step', step)
                torch.save(net.state_dict(), os.path.join(folder_name, 'model.pt-step{}'.format(step)))
                print('OK.')
            if step % const.VAL_EVERY_STEPS == 0:
                if const.VAL_WHILE_TRAIN:
                    evaluate(net, step, writer, comp_dataset, fitb_dataset)
                    net.train() ## 之前是不是忘了这个东西？？
        # learning rate decay
        learning_rate *= const.LEARNING_RATE_DECAY
        optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
        # Save
        if hasattr(const, "SAVE_EVERY_EPOCHS") and (epoch + 1) % const.SAVE_EVERY_EPOCHS == 0:
            print('Saving Model....')
            net.set_buffer('step', step)
            torch.save(net.state_dict(), os.path.join(folder_name, 'model.pt-epoch{}'.format(epoch + 1)))
            print('OK.')
        # 每次epoch结束也来evaluate一下
        if hasattr(const, 'VAL_START_EPOCH') and epoch + 1 < const.VAL_START_EPOCH:
            continue
        if (epoch + 1) % const.VAL_EVERY_EPOCHS == 0:
            evaluate(net, step, writer, comp_dataset, fitb_dataset)
