from abc import ABC, abstractmethod
from ais.core import *
import torch
from torch.nn import functional as F
from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events, Engine
from ignite.contrib.engines import common
import ignite.contrib.handlers.param_scheduler as ignite_scheduler
import ignite.utils as ignite_utils
from ais.utils import params_for

__all__ = ['SupervisedTrainer']


def _prepare_batch(
    batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
):
    """Prepare batch for training: pass to a device with options.

    """
    x, y = batch
    return (
        ignite_utils.convert_tensor(x, device=device, non_blocking=non_blocking),
        ignite_utils.convert_tensor(y, device=device, non_blocking=non_blocking),
    )


class SupervisedTrainer(ABC):
    def __init__(self, model,
                 criterion=None,
                 optimizer=None,
                 lr_scheduler=None,
                 lr=0.001,
                 gpus=1,
                 distributed=False,
                 **kwargs):
        """

        :param model:
        :param criterion:
        :param optimizer:
        :param lr_scheduler:
        :param lr:
        :param evaluator:validation_evaluator, train_evaluator
        :param gpus:
        :param distributed:
        :param kwargs:
        """
        super(SupervisedTrainer, self).__init__()
        self.model:torch.nn.Module = model
        self.criterion = criterion
        self.optimizer:Optimizer = optimizer
        self.lr_scheduler:ignite_scheduler.ParamScheduler = lr_scheduler
        self.lr = lr
        self.gpus = gpus
        self.distributed = distributed
        self.trainer = None
        vars(self).update(kwargs)

    @property
    def to_save(self):
        return {'trainer': self.trainer, 'module_dict': self.model.module if hasattr(self.model, 'module') else self.model, 'optimizer': self.optimizer, 'lr_scheduler': self.lr_scheduler}


    def create_trainer_common(self, prepare_batch=_prepare_batch,
                              output_transform=lambda x, y, y_pred, loss: loss.item(),
                              train_loader=None,
                              checkpoint_every=1,
                              output_path=None,
                              display_every=10,
                              log_every=20):
        self.device = "cpu"
        if self.gpus > 0:
            if torch.cuda.is_available():
                self.device = "cuda"
                self.model.to(self.device)

            if not self.distributed and self.gpus >= 2:
                self.model = torch.nn.DataParallel(self.model, device_ids=range(self.gpus))
        self.trainer = create_supervised_trainer(self.model, self.optimizer, self.criterion, device=self.device,
                                            prepare_batch=prepare_batch, output_transform=output_transform)
        train_sampler = train_loader.sampler if self.distributed else None

        metric_names = ['batch loss', ]
        common.setup_common_training_handlers(self.trainer,
                                              train_sampler=train_sampler,
                                              to_save=self.to_save,
                                              save_every_iters=checkpoint_every,
                                              output_path=output_path,
                                              #lr_scheduler=self.lr_scheduler,
                                              output_names=metric_names,
                                              with_pbar_on_iters=display_every,
                                              log_every_iters=log_every)

        if isinstance(self.lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
            self.trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda engine: self.lr_scheduler.step())
        else:
            self.trainer.add_event_handler(Events.EPOCH_STARTED, self.lr_scheduler)

        return self.trainer

    def configure_optimizers(self, data_len):
        """
        配置优化器
        :param optim_func:优化器(SGD, Adam)
        :param lr:初始学习率
        :param args:
        :return:
        """
        optimizer_kwargs = self.__get_param_from_dict_('optimizer')
        scheduler_kwargs = self.__get_param_from_dict_('scheduler')
        warmup_kwargs = self.__get_param_from_dict_('warmup')
        # if 'step_size' in scheduler_kwargs and not issubclass(self.lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
        #     scheduler_kwargs['step_size'] *= data_len
        if self.optimizer:
            self.optimizer = self.optimizer(self.model.parameters(), **optimizer_kwargs)
        if self.lr_scheduler and self.optimizer:
            if issubclass(self.lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
                self.lr_scheduler = self.lr_scheduler(self.optimizer, **scheduler_kwargs)

                if warmup_kwargs  and isinstance(warmup_kwargs, dict) and len(warmup_kwargs) > 0:
                    self.lr_scheduler = ignite_scheduler.create_lr_scheduler_with_warmup(self.lr_scheduler,
                                                                warmup_start_value=0.0,
                                                                warmup_end_value=optimizer_kwargs['lr'],
                                                                **warmup_kwargs)
            else:
                self.lr_scheduler = ignite_scheduler.LRScheduler(self.lr_scheduler(self.optimizer, **scheduler_kwargs))

    def __get_param_for(self, prefix):
        return params_for(prefix, self.__dict__)


    def __get_param_from_dict_(self, prefix):
        return self.__dict__[prefix+'__param']


    # @abstractmethod
    # def create_train_evaluator(self, *args, **kwargs):
    #     """
    #
    #     :return:
    #     """
    #
    # @abstractmethod
    # def create_val_evaluator(self, *args, **kwargs):
    #     """
    #
    #     :return:
    #     """

    def fire_trainer(self, data_loader:Iterable, max_eppchs:int):
        """

        :return:
        """
        self.trainer.run(data_loader, max_eppchs=max_eppchs)


