import logging

import torch

from ignite._utils import convert_tensor

logger = logging.getLogger("match")


class DespeckleTrainer:
    device = "cuda"
    attached = None
    optim_fts = None
    opts = None

    logger = None
    log_str = ""
    log_freq = 100 # print iter info in log with on_iteration_start


    def __init__(self, model, optimizer, loss_fn, logger, config):
        self.attached = {}
        self.opts = config.trainer_config
        self.device = torch.device(config.device)
        self.logger = logger

        self.log_freq = config.log_freq


    def train(self, engine, batch):
        return None, None


    def evaluate(self, engine, batch):
        return None, None


    def on_epoch_start(self, engine, phase=None):
        if phase == "train":
            self.curr_epoch = engine.state.epoch 


    def on_epoch_end(self, engine, phase=None):        
        if phase in ["train", "evaluate"]:
            info = f"Epoch[{engine.state.epoch}] on {phase} - Metrics:"
            for k, v in engine.state.metrics.items():

                if type(v) == list:
                    for i in range(0, len(v), 2):
                        info += f"\n{k}-{v[i]} : {v[i+1]:<15.8f} "
                else:
                    info += f"\n{k} : {v:<15.8f} "

            logger.info(info)


    def on_iteration_start(self, engine, phase=None):           
        if phase == "train":
            curr_iter = (engine.state.iteration - 1) % len(self.attached["train_loader"]) + 1

            if curr_iter % self.log_freq == 0:
                logger.info("Epoch[{}] Iteration[{}/{}] {}".format(engine.state.epoch, curr_iter, len(self.attached["train_loader"]), self.log_str))
        
        elif phase == "evaluate":
            curr_iter = (engine.state.iteration - 1) % len(self.attached["validation_loader"]) + 1
            if curr_iter % self.log_freq == 0:
                logger.info("Iteration[{}/{}]".format(curr_iter, len(self.attached["validation_loader"])))


    def on_iteration_end(self, engine, phase=None):
        pass


    def attach(self, name, obj):
        self.attached[name] = obj


    def _prepare_batch(self, batch, non_blocking=True):
        '''
        move batch from cpu to gpu
        '''

        if not isinstance(batch, list):
            ret = convert_tensor(batch, self.device, non_blocking=non_blocking).float()
        else:
            ret = []

            for b in batch:
                ret.append(self._prepare_batch(b, non_blocking=non_blocking))
        
        return ret


    def _zero_grad(self):
        if self.optim_fts:
            self.optim_fts.zero_grad()




