import logging

import torch

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

class hardnetTrainer(BaseTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 20 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(hardnetTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA

        self.Lmatch = loss_fn["Lmatch"][0]
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        sar_patchs, opt_patchs, labels = self._prepare_batch(batch)

        self._zero_grad()

        featrueA = self.FtsA(sar_patchs)
        featrueB = self.FtsB(opt_patchs)

        loss = self._calculate_loss(featrueA, featrueB)

        if self.tb_img_freq!=0 or engine.state.iteration%self.tb_img_freq==0:
            self.logger.log_image_grid("train/sar", sar_patchs, "train")
            self.logger.log_image_grid("train/opt", opt_patchs, "train")

        
        self.logger.add_scalar('train/loss', loss.item(), curr_step)

        log_str += "Lmatch: {:.5f} \t".format(loss.item())

        loss.backward()
        self.optim_fts.step()

        self.log_str = log_str

        return None, None


    def evaluate(self, engine, batch):
        self.FtsA.eval()
        self.FtsB.eval()

        with torch.no_grad():
            sar_patchs, opt_patchs, labels = self._prepare_batch(batch)

            featrueA = self.FtsA(sar_patchs)
            featrueB = self.FtsB(opt_patchs)

        dists = torch.sqrt(torch.sum((featrueA - featrueB) ** 2, 1))

        sampler_dict = {
            "scalar-scalar" : {
                "dists" : dists, 
                "labels" : labels
            }, 
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs,
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return None, (dists, labels)


    def _calculate_loss(self, a, b, *, device=""):
        if device == "":
            device = self.device

        loss = self.Lmatch(a, b)
        loss = loss.to(device)

        return loss


if __name__ == "__main__":
    def _map_to_location(out):
        b, h, w = out.size()

        loc = torch.argmax(out.view(b, -1), dim=1)
        row = loc // w
        col = loc % w
        xy = torch.stack((col, row), dim=1)
        
        return xy


    def _l2_dis(out, gt_shift):
        pred_loc = _map_to_location(out)
        
        square = torch.sum((pred_loc-gt_shift) ** 2, dim=1)
        dis = torch.sqrt(square)

        return dis

    shift = torch.randint(15, (2, 2))
    gt_shift = torch.randint(15, (2, 2))

    maps = torch.randint(10, (2, 16, 16))
    for i in range(shift.size(0)):
        maps[i, shift[i][1], shift[i][0]] = 11

    dis = _l2_dis(maps, gt_shift)

    print(shift)
    print(gt_shift)
    print(dis)
