import os
import sys
import logging
import signal
import datetime
import random
from argparse import ArgumentParser

import numpy as np

import torch
import torch.nn as nn

from ignite.engine.engine import Engine, Events

from utils import Experiment
from utils.factory import *
from logger.textual_log import initLog

initLog(phase="valid")
logger = logging.getLogger("match")

torch.backends.cudnn.benchmark = True


def main(config):

    logger.info("INFO: {}".format(config.toDict()))

    # Set the random number generator seed for torch, as we use their dataloaders this will ensure shuffle is constant
    # Remeber to seed custom datasets etc with the same seed
    if config.seed > 0:
        torch.backends.cudnn.deterministic = True
        torch.cuda.manual_seed_all(config.seed)
        torch.manual_seed(config.seed)
        random.seed(config.seed)
        np.random.seed(config.seed)

    if config.device == "cpu" and torch.cuda.is_available():
        logger.warning("WARNING: Not using the GPU")
    elif config.device == "cuda":
        config.device = f"cuda:{config.device_ids[0]}"

    logger.info("INFO: Creating datasets and dataloaders...")

    config_val = config.datasets.validation.copy()
    dset_val = create_dataset(config_val)
    loader_val = get_data_loader(dset_val, config_val)
    
    logger.info("Using validation dataset of {} samples or {} batches".format(len(dset_val), len(loader_val)))

    cp_paths = None
    last_epoch = 0
    if 'checkpoint' in config:
        checkpoint_dir = config.checkpoint_dir if 'checkpoint_dir' in config else config.result_path
        cp_paths, last_epoch = config.get_checkpoints(path=checkpoint_dir, tag=config.checkpoint)
        logger.info("Found checkpoint {} for Epoch {}".format(config.checkpoint, last_epoch))
        last_epoch = last_epoch if config.resume_from == -1 else config.resume_from

    # 加载模型
    models = {} # 包含每个网络和对象的字典
    for name, model in config.model.items():
        logger.info("INFO: Building the {} model".format(name))
        models[name] = build_model(model)

        # Load the checkpoint
        if name in cp_paths:
            # models[name].load_state_dict( torch.load( cp_paths[name] ) )
            models[name].load_state_dict( 
                torch.load( cp_paths[name],  
                map_location=lambda storage, loc: storage.cpu()) 
            )
            logger.info("INFO: Loaded model {} checkpoint {}".format(name, cp_paths[name]))

        # 多卡
        if len(config.device_ids) > 1:
            models[name] = nn.DataParallel(models[name], device_ids=config.device_ids)
        models[name].to(config.device)
        logger.info(models[name])

        if 'debug' in config and config.debug is True:
            logger.info("*********** {} ************".format(name))
            for name, param in models[name].named_parameters():
                if param.requires_grad:
                    logger.info(name, param.data)

    # 优化器
    optimizers = {}
    for name, conf in config.optimizer.items():
        optim_conf = conf.copy()
        del optim_conf["models"]

        model_params = []
        for model_id in conf.models:
            model_params.extend( list(filter(lambda p: p.requires_grad, models[model_id].parameters())) )
        
        logger.info("INFO: Using {} Optimization for {}".format(list(optim_conf.keys())[0], name))
        optimizers[name] = get_optimizer(model_params, optim_conf)

        # Restoring the optimizer breaks because we do not include all parameters in the optimizer state. So if we aren't continuing training then just make a new optimizer
        if name in cp_paths and 'checkpoint_dir' not in config:
            optimizers[name].load_state_dict( torch.load( cp_paths[name] ) )
            logger.info("INFO: Loaded {} optimizer checkpoint {}".format(name, cp_paths[name]))

            for state in optimizers[name].state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.to(config.device)

    # 损失
    losses = {}
    for name, fcns in config.loss.items():
        losses[name] = []
        if type(fcns) == list:
            for l in fcns:
                losses[name].append( get_loss_fn(l) )
                assert losses[name][-1], "Loss function {} for {} could not be found, please check your config".format(l, name)
        else:
            fn_name = fcns["name"]
            fcns.pop("name")
            losses[name].append( get_loss_fn(fn_name, **fcns) )

    # tensorboard
    exp_logger = None
    if 'logger' in config:
        logger.info("INFO: Initialising the experiment logger")
        exp_logger = get_experiment_logger(config.result_path, config.logger)
        if last_epoch > 0: # 上一次的epoch编号
            exp_logger.fast_forward(last_epoch, len(loader_val))

    # 训练器
    logger.info("INFO: Creating training manager and configuring callbacks")
    trainer = get_trainer(models, optimizers, losses, exp_logger, config)

    evaluator_engine = Engine(trainer.evaluate)

    trainer.attach("validation_loader", loader_val)
    trainer.attach("evaluation_engine", evaluator_engine)

    # metrics
    for phase in config.metrics.keys():
        if phase == "train": continue
        if phase == "validation": engine = evaluator_engine

        for name, cfg in config.metrics[phase].items():
            type_ = cfg.type
            settings = cfg.config.toDict()
            metric = get_metric_m(type_, **settings)
            if metric is not None:
                metric.attach(engine, name)
            else:
                logger.warning("WARNING: Metric {} could not be created for {} phase".format(name, phase))

    # Register custom callbacks with the engines
    if check_if_implemented(trainer, "on_iteration_start"):
        evaluator_engine.add_event_handler(Events.ITERATION_STARTED, trainer.on_iteration_start, phase="evaluate")
    if check_if_implemented(trainer, "on_iteration_end"):
        evaluator_engine.add_event_handler(Events.ITERATION_COMPLETED, trainer.on_iteration_end, phase="evaluate")
    if check_if_implemented(trainer, "on_epoch_start"):
        evaluator_engine.add_event_handler(Events.EPOCH_STARTED, trainer.on_epoch_start, phase="evaluate")
    if check_if_implemented(trainer, "on_epoch_end"):
        evaluator_engine.add_event_handler(Events.EPOCH_COMPLETED, trainer.on_epoch_end, phase="evaluate")

    # Save the config for this experiment to the results directory, once we know the params are good
    config.save()

    def signal_handler(sig, frame):
        logger.info('You pressed Ctrl+C!')
        if exp_logger is not None:
            exp_logger.teardown()
            sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    logger.info("INFO: Starting evaluating...")
    evaluator_engine.run(loader_val, max_epochs=1)

    if exp_logger is not None:
        exp_logger.teardown()


def get_args():
    parser = ArgumentParser()
    parser.add_argument('-c', '--config', default=None, type=str, required=True, help='config file path (default: None)')
    parser.add_argument('--checkpoint', default=None, type=str, help='Checkpoint tag to reload')    
    parser.add_argument('--checkpoint_dir', default=None, type=str, help='Checkpoint directory to reload')
    parser.add_argument('--suffix', default=None, type=str, help='Add to the name')
    args = parser.parse_args()

    return args


if __name__ == "__main__":

    args = get_args()

    OVERLOADABLE = ['checkpoint', 'checkpoint_dir']

    overloaded = {}
    for k, v in vars(args).items():
        if (k in OVERLOADABLE) and (v is not None):
            overloaded[k] = v

    if args.suffix == "time":
        args.suffix = datetime.datetime.now().strftime("Val%Y-%m-%d@%H-%M-%S")

    config = Experiment.load_from_path(args.config, overloaded, args.suffix)

    if config.overwrite is False:
        assert not config.exists(), "Results directory {} already exists! Please specify a new experiment name or the remove old files.".format(config.result_path)
    else:
        empty_folder(config.result_path)

    main(config)



