import logging

import torch

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

class FDNetTrainer(BaseTrainer):

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(FDNetTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA

        self.Lmatch = loss_fn["Lmatch"][0]
        
        if "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        sar_patchs, opt_patchs = self._prepare_batch(batch)

        self._zero_grad()

        _, featrueA = self.FtsA(sar_patchs)
        _, featrueB = self.FtsB(opt_patchs)

        loss = self._calculate_loss(featrueA, featrueB, self.mtc_fn)

        self.logger.add_scalars('train/loss', {'Lmatch': loss.item()}, curr_step)

        log_str += "Lmatch: {:.5f} \t".format(loss.item())

        loss.backward()
        # print(featrueA.grad)
        self.optim_fts.step()

        self.log_str = log_str

        # l2 = self._l2_dis((1-corr_map).detach(), shift)

        return None, None


    def evaluate(self, engine, batch):
        self.FtsA.eval()
        self.FtsB.eval()

        with torch.no_grad():
            x, y = self._prepare_batch(batch)
            sar_patchs, opt_patchs = x
            labels, shift = y

            _, featrueA = self.FtsA(sar_patchs)
            _, featrueB = self.FtsB(opt_patchs)

            h_a, h_b = featrueA.size(2), featrueB.size(2)
            if h_a >= h_b:
                s_p = featrueA
                t_p = featrueB
            else:
                s_p = featrueB
                t_p = featrueA

            corr_map = self.mtc_fn(s_p, t_p)

        l2, loc = self._l2_dis((1-corr_map).detach(), shift, ret_pred=True)

        sampler_dict = {
            "scalar-scalar" : {
                "l2=dis" : l2, 
                "loc" : loc
            }, 
            "sar=mft-fmap_chs" : featrueA, 
            "opt=mft-fmap_chs" : featrueB, 
            "corr=map-fmap_chs" : 1-corr_map,  
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs, 
                "gt=map-img" : labels, 
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return l2, None


    def _calculate_loss(self, input_a, input_b, mtc_fn, *, device=""):
        if device == "":
            device = self.device

        loss = self.Lmatch(input_a, input_b, mtc_fn)
        loss = loss.to(device)

        return loss



