#!/usr/bin/python3

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from argparse import ArgumentParser
from json import load as json_load
from json import dump as json_dump
import logging
from os import path as os_path
from os import makedirs

from numpy import save as np_save

from torch import save as torch_save
from torch import load as torch_load
from torch.optim import Adam

from torch.utils.data import DataLoader

from model import KGEModel
from embedding_model_config import BasicConfig
from dataloader import TrainDataset, TestDataset
from dataloader import BidirectionalOneShotIterator


def parse_args(args=None):
    parser = ArgumentParser(
        description='Training and Testing Knowledge Graph Embedding Models',
        usage='train.py [<args>] [-h | --help]'
    )

    parser.add_argument('--cuda', action='store_true', help='use GPU')
    parser.add_argument("--dataset", type=str, default="FB15k")

    parser.add_argument('--do_test', action='store_true')

    parser.add_argument('--data_path', type=str, default=None)
    parser.add_argument('--model', default='TransE', type=str)
    parser.add_argument('-de', '--double_entity_embedding', action='store_true')
    parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
    parser.add_argument("-ne", '--n_entity', type=int)
    parser.add_argument("-ud", "--use_delta", action="store_true")
    parser.add_argument("-id", "--init_delta", default=0.5, type=float)

    parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
    parser.add_argument('-d', '--hidden_dim', default=500, type=int)
    parser.add_argument('-g', '--gamma', default=12.0, type=float)
    parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
    parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
    parser.add_argument('-b', '--batch_size', default=1024, type=int)
    parser.add_argument('-r', '--regularization', default=0.0, type=float)
    parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
    parser.add_argument('--uni_weight', action='store_true',
                        help='Otherwise use subsampling weighting like in word2vec')

    parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
    parser.add_argument("-mlr", "--min_learning_rate", default=0.000001, type=float)
    parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
    parser.add_argument("-log", "--log_path", default=None, type=str)
    parser.add_argument('-save', '--save_path', default=None, type=str)
    parser.add_argument('--max_steps', default=20000, type=int)
    parser.add_argument('--warm_up_steps', default=None, type=int)
    parser.add_argument("-ep", '--epoch_num', default=20, type=int)

    parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
    parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')

    parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
    parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')

    return parser.parse_args(args)


def override_config(args):
    '''
    Override model and data configuration
    '''

    with open(os_path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
        argparse_dict = json_load(fjson)

    if args.data_path is None:
        args.data_path = argparse_dict['data_path']
    args.model = argparse_dict['model']
    args.double_entity_embedding = argparse_dict['double_entity_embedding']
    args.double_relation_embedding = argparse_dict['double_relation_embedding']
    args.hidden_dim = argparse_dict['hidden_dim']
    args.test_batch_size = argparse_dict['test_batch_size']


def save_model(model, optimizer, save_variable_list, args):
    '''
    Save the parameters of the model and the optimizer,
    as well as some other variables such as step and learning_rate
    '''
    save_path = args.save_path

    if not os_path.exists(save_path):
        makedirs(save_path)

    argparse_dict = vars(args)
    with open(os_path.join(save_path, 'config.json'), 'w') as fjson:
        json_dump(argparse_dict, fjson)

    torch_save({
        **save_variable_list,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict()},
        os_path.join(save_path, 'checkpoint')
    )

    torch_save(model, os_path.join(save_path, "{}_model".format(args.model)))

    entity_embedding = model.entity_embedding.detach().cpu().numpy()
    np_save(
        os_path.join(save_path, 'entity_embedding'),
        entity_embedding
    )

    relation_embedding = model.relation_embedding.detach().cpu().numpy()
    np_save(
        os_path.join(save_path, 'relation_embedding'),
        relation_embedding
    )


def load_model(args):
    model = torch_load(os_path.join(args.save_path, "{}_model".format(args.model)))
    return model


def read_triple(args, filename):
    '''
    Read triples and map them into ids.
    '''
    fin = open(os_path.join(args.data_path, filename), "r")
    triples = []
    for line in fin:
        elems = line.strip().split("\t")
        triples.append((int(elems[0]), int(elems[1]), int(elems[2])))
    fin = open(os_path.join(args.data_path, "info.json"), "r")
    info = json_load(fin)
    return triples, info["n_entity"], info["n_relation"]


def set_logger(args, log_name):
    '''
    Write logs to checkpoint and console
    '''
    if not os_path.exists(args.log_path):
        makedirs(args.log_path)


    log_file = os_path.join(args.log_path, '{}.log'.format(log_name))

    with open(log_file, "w", encoding="utf-8") as fout:
        fout.write("")

    logging.basicConfig(
        format='%(asctime)s %(levelname)-8s %(message)s',
        level=logging.INFO,
        datefmt='%Y-%m-%d %H:%M:%S',
        filename=log_file,
        filemode='a+'
    )
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    logger = logging.getLogger('')
    logger.addHandler(console)
    return logger


def log_metrics(epoch, mode, step, metrics, logger):
    '''
    Print the evaluation logs
    '''
    for metric in metrics:
        logger.info('Epoch %d %s %s at step %d: %f' % (epoch, mode, metric, step, metrics[metric]))


def run(args, logger, data_loader, test_dataset_list=None):
    if args.continue_train:
        if not os_path.exists(os_path.join(args.save_path, "{}_model".format(args.model))):
            raise ValueError("model not found...")
        model = load_model(args)
    else:
        model = KGEModel(
            model_name=args.model,
            nentity=args.nentity,
            nrelation=args.nrelation,
            hidden_dim=args.hidden_dim,
            gamma=args.gamma,
            use_cuda=args.cuda,
            init_delta=args.init_delta,
            double_entity_embedding=args.double_entity_embedding,
            double_relation_embedding=args.double_relation_embedding
        )

    if args.cuda:
        model = model.cuda()

    logger.info('Model Parameter Configuration:')
    for name, param in model.named_parameters():
        logger.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))

    optimizer = Adam(
                filter(lambda p: p.requires_grad, model.parameters()),
                lr=args.learning_rate
            )
    logger.info('Ramdomly Initializing %s Model...' % args.model)
    logger.info('Start Training...')
    logger.info('learning_rate = %f' % args.learning_rate)
    logger.info('min_learning_rate = %f' % args.min_learning_rate)
    logger.info('batch_size = %d' % args.batch_size)
    logger.info("negative_batch_size = %d" % args.negative_sample_size)
    logger.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)
    logger.info('hidden_dim = %d' % args.hidden_dim)
    logger.info('gamma = %f' % args.gamma)
    logger.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
    logger.info("init delta: %f" % args.init_delta)
    logger.info("use delta: %s" % str(args.use_delta))
    logger.info("max steps: %d" % args.max_steps)
    if args.negative_adversarial_sampling:
        logger.info('adversarial_temperature = %f' % args.adversarial_temperature)

    warm_up_steps = args.warm_up_steps
    current_learning_rate = args.learning_rate
    average_metrics = {}

    for epoch in range(args.epoch_num):
        warm_up_steps, current_learning_rate = train_steps(model, data_loader, optimizer, args, epoch, warm_up_steps,
                                                           current_learning_rate, logger)
        if args.do_test:
            average_metrics = test_steps(model, epoch, average_metrics, test_dataset_list, logger)
    for metric in average_metrics:
        average_metrics[metric] /= args.epoch_num

    log_metrics(args.epoch_num, "Average", args.max_steps, average_metrics, logger)


def train_steps(kge_model, train_iterator, optimizer, args, epoch, warm_up_steps, current_learning_rate, logger):
    training_logs = []

    # Training Loop
    step = 0
    for step in range(args.max_steps):

        log = kge_model.train_step(kge_model, optimizer, train_iterator, args)

        training_logs.append(log)

        if step >= warm_up_steps:
            current_learning_rate = max(current_learning_rate / 2, args.min_learning_rate)
            logger.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
            optimizer = Adam(
                filter(lambda p: p.requires_grad, kge_model.parameters()),
                lr=current_learning_rate
            )
            warm_up_steps = warm_up_steps * 3

        if step % args.log_steps == 0:
            metrics = {}
            for metric in training_logs[0].keys():
                metrics[metric] = sum([log[metric] for log in training_logs]) / len(training_logs)
            log_metrics(epoch, 'Training average', step, metrics, logger)
            training_logs = []

    save_variable_list = {
        'step': step,
        'current_learning_rate': current_learning_rate,
        'warm_up_steps': warm_up_steps
    }
    save_model(kge_model, optimizer, save_variable_list, args)
    return warm_up_steps, current_learning_rate


def test_steps(kge_model, epoch, average_metrics, test_dataset_list, logger):
    logger.info('Evaluating on Test Dataset...')
    metrics = kge_model.test_step(kge_model, test_dataset_list, args)
    log_metrics(epoch, "Test", args.max_steps, metrics, logger)

    if epoch == 0:
        average_metrics = metrics.copy()
    else:
        average_metrics["MRR"] += metrics["MRR"]
        average_metrics["MR"] += metrics["MR"]
        average_metrics["HITS@1"] += metrics["HITS@1"]
        average_metrics["HITS@3"] += metrics["HITS@3"]
        average_metrics["HITS@10"] += metrics["HITS@10"]
    return average_metrics


def main(args):
    if args.data_path is None:
        raise ValueError('data_path must be choosed.')

    if args.save_path is None:
        raise ValueError('Where do you want to save your trained model?')

    if args.save_path and not os_path.exists(args.save_path):
        makedirs(args.save_path)

    # Write logs to checkpoint and console
    log_name = os_path.join("{}_train".format(args.model))
    logger = set_logger(args, log_name)

    logger.info('Model: %s' % args.model)
    logger.info('Dataset: %s' % args.dataset)
    logger.info('Data Path: %s' % args.data_path)
    logger.info('Save Path: %s' % args.save_path)
    logger.info('Log Path: %s' % args.log_path)

    all_true_triples, train_triples = [], []
    train_triples, n_entity_real, n_relation = read_triple(args, 'train_id.txt')
    logger.info('#train: %d' % len(train_triples))
    all_true_triples += train_triples

    if args.do_test:
        test_triples, n_entity_real, n_relation = read_triple(args, "test_id.txt")
        logger.info("#test: %d" % len(test_triples))
        all_true_triples += test_triples

    args.nentity = n_entity_real
    args.nrelation = n_relation
    logger.info("#entity: %d" % n_entity_real)
    logger.info("#relation: %d" % n_relation)

    # Set training dataloader iterator
    train_dataloader_head = DataLoader(
        TrainDataset(train_triples, n_entity_real, n_relation, args.negative_sample_size, 'head-batch'),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=max(1, args.cpu_num // 2),
        collate_fn=TrainDataset.collate_fn
    )

    train_dataloader_tail = DataLoader(
        TrainDataset(train_triples, n_entity_real, n_relation, args.negative_sample_size, 'tail-batch'),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=max(1, args.cpu_num // 2),
        collate_fn=TrainDataset.collate_fn
    )

    train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)

    test_dataset_list = []
    if args.do_test:
        test_dataloader_head = DataLoader(
            TestDataset(
                test_triples,
                all_true_triples,
                args.nentity,
                args.nrelation,
                'head-batch'
            ),
            batch_size=args.test_batch_size,
            num_workers=max(1, args.cpu_num // 2),
            collate_fn=TestDataset.collate_fn
        )

        test_dataloader_tail = DataLoader(
            TestDataset(
                test_triples,
                all_true_triples,
                args.nentity,
                args.nrelation,
                'tail-batch'
            ),
            batch_size=args.test_batch_size,
            num_workers=max(1, args.cpu_num // 2),
            collate_fn=TestDataset.collate_fn
        )

        test_dataset_list = [test_dataloader_head, test_dataloader_tail]

    run(args, logger, train_iterator, test_dataset_list)


if __name__ == '__main__':
    # args = parse_args()
    use_cuda = True
    dataset = "PowerGrid"
    data_path = os_path.join("..", "..", "data", dataset)
    save_path = os_path.join("..", "..", "model", dataset)
    log_path = os_path.join("..", "..", "record", dataset)
    args = BasicConfig(cuda=use_cuda, data_path=data_path, log_path=log_path, save_path=save_path, dataset=dataset)
    args.do_test = False
    args.learning_rate = 0.005
    args.min_learning_rate = 0.00005
    args.batch_size = 32
    args.negative_sample_size = 32
    args.epoch_num = 5
    args.max_steps = 200
    args.log_steps = 20

    main(args)
    # parameters = load_model(args)
    # print("ok")