import time
import torch
import os
import copy

from model import loss_module
from experiment import exp_runner, exp_factory, exp_plots
from utils import tools

import logging
logging.basicConfig(format='> %(asctime)s | %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)

from torch.utils.tensorboard import SummaryWriter
tb_log_dir = 'runs'
tb_commit = 'test' # Comment log_dir suffix appended to the default log_dir. If log_dir is assigned, this argument has no effect.
#writer = SummaryWriter(log_dir=tb_log_dir, comment=tb_commit)
writer = SummaryWriter()

from options import Options

def main(args):

    logger.info('Showing args...')
    print(args)
    # ============================================ #
    logger.info('Creating saving path...')
    tools.check_save_path(args.exp_savepath, args.model_savepath)

    # initial time
    total_train_time = 0
    total_start_time = time.time()

    # ============================================ #
    modelsavepath_best = os.path.join(args.model_savepath,
                                      'model_best_{}_{}_{}ed.pth'.format(args.data_name, args.exp_encoder, args.exp_type))
    modelsavepath_last = os.path.join(args.model_savepath,
                                      'model_last_{}_{}_{}ed.pth'.format(args.data_name, args.exp_encoder, args.exp_type))


    # if train
    # ============================================ #
    logger.info('Setting seed...')
    if args.exp_seed is not None:
        torch.manual_seed(args.exp_seed)

    # ============================================ #
    if not args.testonly:
        logger.info('Building training data...')
        dataset, newds, dataloader = exp_factory.data_factory(config=args, flag='TRAIN')
    else:
        logger.info('Building test data...')
        dataset, newds, dataloader = exp_factory.data_factory(config=args, flag='TEST')
    logger.info('Size of data: {}'.format(len(newds)))
    # ============================================ #
    logger.info('Re-check parameters...')
    args.ts_len = dataset.ts_len
    args.ts_dim = dataset.ts_dim

    # ============================================ #
    logger.info('Building model...')
    model = exp_factory.model_factory(config=args, data=dataset)
    '''if args.use_pretrain and not args.testonly:
        logger.info('Loading pretrained model and only train out_layer...')
        cp = torch.load(modelsavepath_last)

        model = tools.load_model(model, modelsavepath_last)
        model.to(args.device)
        for name, param in model.named_parameters():
            if name.startswith('classifier'):
                param.requires_grad = True
            else:
                param.requires_grad = True'''
    #logger.info("Model:\n{}".format(model))
    logger.info("Total number of parameters: {}".format(tools.count_parameters(model)))
    logger.info("Trainable parameters: {}".format(tools.count_parameters(model, trainable=True)))

    # ============================================ #
    logger.info('Building optimizer...')
    optim = exp_factory.optim_factory(model=model, args=args)

    # ============================================ #
    logger.info('Building loss...')
    loss_f = loss_module.get_loss_module(task=args.exp_task)

    if args.testonly:  # Only evaluate and skip training
        logger.info('Loading model to evaluate...')
        print('  model-path', modelsavepath_best)
        model = tools.load_model(model, modelsavepath_best)
        model.to(args.device)
        for name, param in model.named_parameters():
            param.requires_grad = False

        if args.exp_task == "regression" and args.exp_type == 'supervised':
            test_evaluator = exp_runner.RegressionRunner(model = model,
                                                         dataset=dataset,
                                                         dataloader = dataloader,
                                                         device = args.device,
                                                         optimizer = optim,
                                                         loss_module = loss_f,
                                                         args=args
                                                         )
        elif args.exp_task == "classification" and args.exp_type == 'supervised':
            test_evaluator = exp_runner.ClassificationRunner(
                model=model,
                dataset=dataset,
                dataloader=dataloader,
                device=args.device,
                optimizer=optim,
                loss_module=loss_f,
                args=args
            )
        elif args.exp_task == 'classification' and args.exp_type == 'pretrain':
            test_evaluator = exp_runner.UnsupervisedClassificationRunner(
            model=model,
            dataset=dataset,
            dataloader=dataloader,
            device=args.device,
            optimizer=optim,
            loss_module=loss_f,
            args=args
        )

        aggr_metrics, targets_all, predictions_all = test_evaluator.evaluate()
        '''print('-----------------------')
        for i in range(len(targets_all)):
            print(targets_all[i], predictions_all[i])'''
        print('-----------------------')
        print('Performance={}'.format(aggr_metrics['epoch_metrics']))
        writer.add_scalar('Performance/{}-{}'.format(args.data_name, args.exp_trainratio), aggr_metrics['epoch_metrics'])
        return


    # ============================================ #
    logger.info('Building experiment ...')
    if args.exp_task == 'regression' and args.exp_type == 'supervised':
        exp = exp_runner.RegressionRunner(
            model = model,
            dataset=dataset,
            dataloader = dataloader,
            device = args.device,
            optimizer = optim,
            loss_module = loss_f,
            args=args
        )
    elif args.exp_task == 'classification' and args.exp_type == 'supervised':
        exp = exp_runner.ClassificationRunner(
            model=model,
            dataset=dataset,
            dataloader=dataloader,
            device=args.device,
            optimizer=optim,
            loss_module=loss_f,
            args=args
        )
    elif args.exp_task == 'classification' and args.exp_type == 'pretrain':
        exp = exp_runner.UnsupervisedClassificationRunner(
            model=model,
            dataset=dataset,
            dataloader=dataloader,
            device=args.device,
            optimizer=optim,
            loss_module=loss_f,
            args=args
        )

    # ============================================ #
    logger.info('Starting Training... The selected dataset is >>>{}<<<"'.format(args.data_name))
    loss_name = 'Loss/{} '.format(args.data_name)
    init_loss = 1e9
    init_epo = 0
    checkpoint = {
        'state_dict': model.state_dict(),
        'best_epo': init_epo,
        'mininum_loss': init_loss,
        'optim': optim.state_dict(),
        'model_saveflag': False
    }
    cp_best = copy.deepcopy(checkpoint)
    cp_last = copy.deepcopy(checkpoint)
    for epo in range(args.exp_epochs):
        print('\n')
        epoch_start_time = time.time()
        epoch_metrics = exp.train_epoch(epoch_num=epo)
        epoch_mean_loss = epoch_metrics['epoch_mean_loss']
        writer.add_scalar(loss_name, epoch_mean_loss, epo)
        cp_best, cp_last = tools.record_model(model=model, epo=epo, loss=epoch_mean_loss,
                                              optim=optim, checkpoint_best=cp_best, checkpoint_last=cp_last)
        epoch_runtime = time.time() - epoch_start_time
        logger.info('Epoch {} Training Summary, Loss={}'.format(epo, epoch_mean_loss))
        logger.info("Epoch runtime: {} hours, {} minutes, {} seconds".format(*tools.readable_time(epoch_runtime)))

    # save model
    if not args.testonly:
        logger.info('Saving model with mininum loss...')
        tools.save_model(modelsavepath_best, cp_best)
        print('    Save Path: ', modelsavepath_best)
        print('    Save Epoch: {}; Total Epoch: {}'.format(cp_best['best_epo'], args.exp_epochs))

        logger.info('Saving model at last epoch...')
        tools.save_model(modelsavepath_last, cp_last)
        print('    Save Path: ', modelsavepath_last)
        print('    Save Epoch: {}; Total Epoch: {}'.format(cp_last['best_epo'], args.exp_epochs))

    writer.flush()
    writer.close()

    total_runtime = time.time() - total_start_time
    logger.info("Total runtime: {} hours, {} minutes, {} seconds\n".format(*tools.readable_time(total_runtime)))

if __name__ == '__main__':
    args = Options().parse()
    print('START EXPERIMENT')
    main(args)
    print('END EXPERIMENT')