import torch
import logging
import time
from torch.nn import DataParallel
from tqdm import tqdm
from easycore.common.registry import Registry


from model import ModelFactory
from .optimizer import OptimizerFactory
from .scheduler import SchedulerFactory
from data.loader import DataLoaderFactory
from data.data_mapper import DataMapper, DataMapper, MultiScaleDataMapper
from data.utils import move_to
from .checkpoint import CheckPointer
from .tensorboard_logging import TensorboardWriter

from utils.exception import EarlyStop


class TrainerFactory:
    registry = Registry('trainer')

    @classmethod
    def register(cls, trainer_type_name=None, obj=None):
        return cls.registry.register(trainer_type_name, obj)

    @classmethod
    def get(cls, config):
        return cls.registry.get(config['trainer']['type'])(config)


@TrainerFactory.register()
class Trainer:
    def __init__(self, config):
        self.logger = logging.getLogger(__name__)
        self.model = self.build_model(config)
        self.devices = config['model']['devices']

        if len(self.devices) == 1:
            self.model.to(self.devices[0])
        else:
            device_ids = [int(device[5:]) for device in self.devices if device_ids.startwith("cuda:")]
            self.model.to(self.devices[0])
            self.model = DataParallel(self.model, device_ids=device_ids)

        self.optimizer = self.build_optimizer(config, self.model)
        self.scheduler = self.build_scheduler(config, self.optimizer)
        self.train_loader = self.build_train_loader(config)
        self.test_loader = self.build_test_loader(config)

        self.start_iteration = 0
        self.max_iteration = config['trainer']['max_iteration']

        self.report_iteration_interval = config['trainer']['report_interval']
        self.checkpoint_save_interval = config['trainer']['save_interval']

        self.checkpointer = CheckPointer(config['output_directory'], self.devices[0])

        self.evaluate_interval = config['trainer']['evaluate_interval']
        self.best_iteration = None
        self.best_loss = None
        self.early_stop_patience = config['trainer']['early_stop_patience']

        self.scheduler_interval = config['trainer']['scheduler_interval']
        self.scheduler_loss = 0
        self.scheduler_iteration = 0

        self.tensorboard_writer = TensorboardWriter(config['output_directory'])

    def resume_or_load(self, resume=True):
        if resume and self.checkpointer.exists():
            self.start_iteration = self.checkpointer.load(self.model, optimizer=self.optimizer, scheduler=self.scheduler)

    def train(self):
        self.logger.info("[begin] Starting training from iteration {}".format(self.start_iteration))
        try:
            self.iteration = self.start_iteration
            for self.iteration in range(self.start_iteration, self.max_iteration):
                train_step_done = False
                self.before_step()
                self.run_step()
                train_step_done = True
                self.after_step()
        except KeyboardInterrupt:
            self.logger.info("[end] Stop by user")
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=(self.iteration if train_step_done else self.iteration - 1))
        except EarlyStop as e:
            self.logger.info(str(e))
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iteration)
        finally:
            self.logger.info("[end] Terminate training at iteration {}".format(self.iteration))

    def before_step(self):
        if (self.iteration - self.start_iteration) % self.report_iteration_interval == 0 or self.iteration == self.start_iteration:
            self.report_dict = {}

    def run_step(self):
        self.model.train()

        start_time = time.time()
        data = next(self.train_loader)
        if not isinstance(self.model, DataParallel):
            data = move_to(data, device=self.devices[0])

        self.optimizer.zero_grad()

        loss_dict, _ = self.model(data)
        loss_dict = {k : v.mean() for k, v in loss_dict.items()}
        loss = sum(loss_dict.values())

        loss.backward()
        self.optimizer.step()

        cost_time = time.time() - start_time

        report_iteration = (self.iteration - self.start_iteration) % self.report_iteration_interval
        self.report_dict['cost_time'] = self.report_dict.get('cost_time', 0.) + cost_time
        self.report_dict['total_loss'] = (self.report_dict.get('total_loss', 0.) * report_iteration + loss.item()) / (report_iteration + 1)
        for key, value in loss_dict.items():
            self.report_dict[key] = (self.report_dict.get(key, 0.) * report_iteration + value.item()) / (report_iteration + 1)

        self.scheduler_loss += (loss.item() + self.scheduler_loss * self.scheduler_iteration) / (self.scheduler_iteration + 1)
        self.scheduler_iteration += 1

    def after_step(self):
        if (self.iteration - self.start_iteration + 1) % self.report_iteration_interval == 0 or self.iteration == self.max_iteration - 1:
            self.logger.info("[iteration {}] ".format(self.iteration) + "   ".join(["{} : {}".format(key, value) for key, value in self.report_dict.items()]))
            self.tensorboard_writer.add_dict(self.report_dict, self.iteration, prefix='train_')

        if (self.iteration + 1) % self.checkpoint_save_interval == 0 or self.iteration == self.max_iteration - 1:
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iteration)

        if self.test_loader is not None:
            if self.evaluate_interval > 0 and (self.iteration + 1) % self.evaluate_interval == 0:
                val_loss = self.evaluate()
                if self.best_loss is None or self.best_loss > val_loss:
                    self.best_loss = val_loss
                    self.best_iteration = self.iteration
                    self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iteration, checkpoint_name='model_best.pth')

            if self.best_iteration is not None and (self.iteration - self.best_iteration) / self.evaluate_interval > self.early_stop_patience > 0:
                raise EarlyStop("[end] early stop at iteration {}, best iteration: {}, best loss: {}".format(self.iteration, self.best_iteration, self.best_loss))

        if (self.iteration + 1) % self.scheduler_interval == 0:
            self.scheduler.step({'val_loss': self.scheduler_loss})
            self.scheduler_loss = 0
            self.scheduler_iteration = 0

    def evaluate(self):
        self.model.eval()
        metric_dict = {
            "total_loss": 0,
        }
        start_time = time.time()
        data_length = 0
        with torch.no_grad():
            for data in tqdm(self.test_loader):
                if not isinstance(self.model, DataParallel):
                    data = move_to(data, self.devices[0])

                data_length += 1

                loss_dict, _ = self.model(data)
                loss_dict = {key: value for key, value in loss_dict.items()}
                loss = sum(loss_dict.values())

                metric_dict['total_loss'] += loss.item()
                for key, value in loss_dict.items():
                    metric_dict[key] = metric_dict.get(key, 0.) + value.item()

            metric_dict['total_loss'] /= data_length
            for key in loss_dict.keys():
                metric_dict[key] /= data_length

        cost_time = time.time() - start_time
        metric_dict['cost_time'] = cost_time

        self.logger.info("[evaluate iteration {}]  ".format(self.iteration) + "   ".join(["{} : {}".format(key, value) for key, value in metric_dict.items()]))
        self.tensorboard_writer.add_dict(metric_dict, self.iteration, prefix='val_')

        return metric_dict['total_loss']

    @classmethod
    def build_model(cls, config):
        return ModelFactory.get(config['model'])

    @classmethod
    def build_optimizer(cls, config, model):
        return OptimizerFactory.get(config['optimizer'], model)

    @classmethod
    def build_scheduler(cls, config, optimizer):
        return SchedulerFactory.get(config['scheduler'], optimizer)

    @classmethod
    def build_train_loader(cls, config):
        assert config['loader']['train']['dataset'] is not None
        return DataLoaderFactory.build_train_loader(config['loader'], mapper=DataMapper(config['loader']))

    @classmethod
    def build_test_loader(cls, config):
        if config['loader']['test']['dataset'] is not None:
            return DataLoaderFactory.build_test_loader(config['loader'], mapper=DataMapper(config['loader']))


@TrainerFactory.register()
class MultiScaleTrainer(Trainer):
    @classmethod
    def build_train_loader(cls, config):
        assert config['loader']['train']['dataset'] is not None
        return DataLoaderFactory.build_train_loader(config['loader'], mapper=MultiScaleDataMapper(config['loader']))
