"""
@Filename       : main.py
@Create Time    : 2020/11/2 16:00
@Author         : Rylynn
@Description    : 

"""
import argparse

from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate.metric import run_evaluation
from model.kg.kg_diffuse import KgDiffuse
from model.kg.kg_diffuse_plus import KgDiffusePlus

from util.dataloader import DiffuseSequenceDataSet, sequence_collate_fn

import torch
import torch.optim as optim

from util.optim import ScheduledOptim
from util.preprocess import load_vocab_dict, load_content


def train(model_clz, config):
    dataset = config['dataset']
    vocab_dict = load_vocab_dict('../data', dataset)
    train_dataset = DiffuseSequenceDataSet('../data/{}/cascade.txt'.format(dataset), vocab_dict)
    test_dataset = DiffuseSequenceDataSet('../data/{}/cascadetest.txt'.format(dataset), vocab_dict)

    dataloader = DataLoader(train_dataset,
                            batch_size=config['train_batch_size'],
                            shuffle=True,
                            collate_fn=sequence_collate_fn)

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=config['test_batch_size'],
                                 collate_fn=sequence_collate_fn)

    model = model_clz(config)
    model = model.cuda()
    adam = optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09, lr=config['lr'])
    optimizer = ScheduledOptim(adam, 64, 1000)


    best_hits_10 = 0
    for epoch in range(config['epoches']):
        model.train()
        average_batch_loss = 0.0
        # ------------- Training -------------
        for (content_id, data) in tqdm(dataloader):
            data = data.cuda()
            next_node = data[:, 1:].contiguous().view(-1)
            pred = model(content_id, data)
            loss = model.loss(pred, next_node)

            l2_loss = 0
            for param in model.parameters():
                l2_loss += torch.sum(torch.norm(param, 2))
            loss += l2_loss * 0.0005
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            optimizer.update_learning_rate()
            average_batch_loss += loss.item()
            if config['knowledge_aware'] and model_clz == KgDiffusePlus:
                model.kg_attention_pooling.clear_uikg_pooling_gradient()
        else:
            print('Epoches {}/{}, prediction loss:{} '.format(epoch, config['epoches'],
                                                              average_batch_loss / len(dataloader)))

        # --------------     Evaluation   --------------
        if (epoch + 1) % 2 == 0:
            res = run_evaluation(model, test_dataloader)
            if res['hits@10'] > best_hits_10:
                torch.save(model, open('../models/{}_{}_best.pkl'.format(model.__class__.__name__, dataset), mode='wb'))
                print('Write BEST model into disk with result: {}'.format(res))
                best_hits_10 = res['hits@10']
        # -------------- Finish Evaluation -------------


def load_embedding(filepath):
    with open(filepath) as f:
        node_nums, dim = f.readline().split(' ')
        user_feat = torch.zeros(eval(node_nums) + 2, eval(dim))
        for embed in f.readlines():
            idx = eval(embed.split(' ')[0])
            vec = [eval(vec) for vec in embed.split(' ')[1:]]
            user_feat[idx + 1] = torch.FloatTensor(vec)

    return user_feat


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--use_graph', action="store_true")
    parser.add_argument('--dataset', type=str, default="dblp_new")

    args = parser.parse_args()
    dataset = args.dataset

    node_num_dict = {
        'weibo': 5000 + 1,
        'dblp': 17174 + 1,
        'dblp_new': 9941 + 1,
        'memetracker': 10407 + 1,
        'lastfm': 971 + 1
    }

    config = {
        'dataset': dataset,
        'node_num': node_num_dict[dataset],
        'feat_dim': 128,
        'embed_dim': 128,
        'hidden_dim': 128,
        'state_dim': 128,
        'pos_dim': 8,
        'epoches': 100,
        'train_batch_size': 16,
        'test_batch_size': 16,
        'lr': 0.001,
        'dropout': 0.5,
        'window_size': 3,
        'content_aware': False,
        'knowledge_aware': True
    }

    print(config)
    model_clz = KgDiffuse
    train(model_clz=model_clz, config=config)
