import logging

import torch

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

class coarse2fineTrainer(BaseTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 20 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(coarse2fineTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA
        self.FtsM = []
        if "FtsM" in model:
            self.FtsM.append(model["FtsM"])
        if "FtsM1" in model:
            self.FtsM.append(model["FtsM1"])
        if "FtsM2" in model:
            self.FtsM.append(model["FtsM2"])
        if len(self.FtsM) == 0:
            self.FtsM = None

        self.Lmatch = loss_fn["Lmatch"][0]
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch

        if not "frozen" in dir(self.opts):
            self.frozen = False
        else:
            self.frozen = self.opts.frozen
        if not "feat" in dir(self.opts):
            self.feat = "desc"
        else:
            self.feat = self.opts.feat


    def get_feat(self, tensors):
        ret = None
        if self.feat == "desc":
            ret = tensors[0]
        elif self.feat == "norm_desc":
            ret = tensors[1]
        elif self.feat == "fm":
            ret = tensors[2]

        return ret


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        if not self.FtsM is None:
            for net in self.FtsM:
                net.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        sar_patchs, opt_patchs, labels = self._prepare_batch(batch)

        self._zero_grad()

        if self.frozen:
            with torch.no_grad():
                descA, norm_descA, fmA = self.FtsA(sar_patchs)
                descB, norm_descB, fmB = self.FtsB(opt_patchs)
        else:
            descA, norm_descA, fmA = self.FtsA(sar_patchs)
            descB, norm_descB, fmB = self.FtsB(opt_patchs)

        featA = self.get_feat((descA, norm_descA, fmA))
        featB = self.get_feat((descB, norm_descB, fmB))

        if self.FtsM is None:
            vecA, vecB = featA, featB
            print("Warning : FtsM is None")
        else:
            if len(self.FtsM) == 1:
                vec = torch.cat((featA, featB), dim=0)
                vec = self.FtsM[0](vec)
                b_size = vec.size(0) // 2
                vecA, vecB = vec[:b_size], vec[b_size:]
            else:
                vecA = self.FtsM[0](featA)
                vecB = self.FtsM[1](featB)

        loss = self._calculate_loss(vecA, vecB)

        if self.tb_img_freq!=0 or engine.state.iteration%self.tb_img_freq==0:
            self.logger.log_image_grid("train/sar", sar_patchs, "train")
            self.logger.log_image_grid("train/opt", opt_patchs, "train")

        
        self.logger.add_scalar('train/loss', loss.item(), curr_step)

        log_str += "Lmatch: {:.5f} \t".format(loss.item())

        loss.backward()
        self.optim_fts.step()

        self.log_str = log_str

        return None, None


    def evaluate(self, engine, batch):
        self.FtsA.eval()
        self.FtsB.eval()
        if not self.FtsM is None:
            for net in self.FtsM:
                net.eval()

        with torch.no_grad():
            sar_patchs, opt_patchs, labels = self._prepare_batch(batch)

            descA, norm_descA, fmA = self.FtsA(sar_patchs)
            descB, norm_descB, fmB = self.FtsB(opt_patchs)

            featA = self.get_feat((descA, norm_descA, fmA))
            featB = self.get_feat((descB, norm_descB, fmB))

            if self.FtsM is None:
                vecA, vecB = featA, featB
                print("Warning : FtsM is None")
            else:
                if len(self.FtsM) == 1:
                    vec = torch.cat((featA, featB), dim=0)
                    vec = self.FtsM[0](vec)
                    b_size = vec.size(0) // 2
                    vecA, vecB = vec[:b_size], vec[b_size:]
                else:
                    vecA = self.FtsM[0](featA)
                    vecB = self.FtsM[1](featB)
                    
        dists = torch.sqrt(torch.sum((vecA - vecB) ** 2, 1))

        sampler_dict = {
            "scalar-scalar" : {
                "dists" : dists, 
                "labels" : labels
            }, 
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs,
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return None, (dists, labels)


    def _calculate_loss(self, a, b, *, device=""):
        if device == "":
            device = self.device

        loss = self.Lmatch(a, b)
        loss = loss.to(device)

        return loss
