import logging

import torch
import torch.nn.functional as F

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

class osmnetTrainer(BaseTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 20 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(osmnetTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA

        self.Lmatch = loss_fn["Lmatch"][0]
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch

        # self.mtc_fn = self._ssd_by_fft
        # if hasattr(self.opts, "metric"):
        #     attrs = self.opts.metric.toDict()
        #     name = attrs["name"]
        #     attrs.pop("name")
        #     if name == "SSD":
        #         self.mtc_fn = partial(self._ssd_by_fft, **attrs)
        #     elif name == "Conv":
        #         self.mtc_fn = partial(self._conv_by_fft, **attrs)


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        (sar_patchs, opt_patchs), (labels, shift) = self._prepare_batch(batch)

        self._zero_grad()

        _, featrueA = self.FtsA(sar_patchs)
        _, featrueB = self.FtsB(opt_patchs)

        h_a, h_b = featrueA.size(2), featrueB.size(2)
        if h_a >= h_b:
            s_p = featrueA
            t_p = featrueB
        else:
            s_p = featrueB
            t_p = featrueA

        corr_map = self.mtc_fn(s_p, t_p)
        loss = self._calculate_loss(corr_map, labels)

        if self.tb_img_freq!=0 or engine.state.iteration%self.tb_img_freq==0:
            self.logger.log_image_grid("train/sar", sar_patchs, "train")
            self.logger.log_image_grid("train/opt", opt_patchs, "train")
            self.logger.log_image_grid("train/corr_map", corr_map.unsqueeze(1), "train")
            self.logger.log_image_grid("train/gt_map", labels.unsqueeze(1), "train")

        # self.logger.add_scalars('train/loss', {'Lmatch': loss.item()}, curr_step)
        self.logger.add_scalar('train/loss', loss.item(), curr_step)

        log_str += "Lmatch: {:.5f} \t".format(loss.item())

        loss.backward()
        self.optim_fts.step()

        self.log_str = log_str

        l2 = self._l2_dis(corr_map.detach(), shift)

        return l2, None


    def evaluate(self, engine, batch):
        self.FtsA.eval()
        self.FtsB.eval()

        with torch.no_grad():
            x, y = self._prepare_batch(batch)
            sar_patchs, opt_patchs = x
            labels, shift = y

            _, featrueA = self.FtsA(sar_patchs)
            _, featrueB = self.FtsB(opt_patchs)

            h_a, h_b = featrueA.size(2), featrueB.size(2)
            if h_a >= h_b:
                s_p = featrueA
                t_p = featrueB
            else:
                s_p = featrueB
                t_p = featrueA

            corr_map = self.mtc_fn(s_p, t_p)

        l2, loc = self._l2_dis((corr_map).detach(), shift, ret_pred=True)

        sampler_dict = {
            "scalar-scalar" : {
                "l2=dis" : l2, 
                "loc" : loc
            }, 
            "sar=mft-fmap_chs" : featrueA, 
            "opt=mft-fmap_chs" : featrueB, 
            "corr=map-fmap_chs" : corr_map,  
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs, 
                "gt=map-img" : labels, 
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return l2, None


    # def _ssd_by_fft(
    #     self, search_patch, template_patch, *, 
    #     omit_monomial=True, mean=True
    # ):
    #     """
    #     calculate ssd of patchs by fft

    #     Param
    #     -----
    #     omit_monomial: omit Sum(I_search^2)
    #     mean: get mean of template size
    #     """

    #     b, c, sh, sw = search_patch.size()
    #     b, c, th, tw = template_patch.size()
    #     mh, mw = sh - th + 1, sw - tw + 1

    #     device = search_patch.device

    #     T = torch.zeros(search_patch.size()).to(device)
    #     T[:, :, 0:th, 0:tw] = 1

    #     sen_x = search_patch ** 2

    #     tmp1 = torch.fft.fft2(sen_x)
    #     tmp2 = torch.fft.fft2(T)

    #     tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)

    #     ssd_f_1 = torch.fft.ifft2(tmp_sum)

    #     ssd_fr_1 = torch.real(ssd_f_1)
    #     ssd_fr_1 = ssd_fr_1[:, 0:mh, 0:mw]


    #     ref_Tx = torch.zeros((b, c, sh, sw)).to(device)
    #     ref_Tx[:, :, 0:th, 0:tw] = template_patch

    #     tmp1 = torch.fft.fft2(search_patch)
    #     tmp2 = torch.fft.fft2(ref_Tx)

    #     tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)
    #     ssd_f_2 = torch.fft.ifft2(tmp_sum)

    #     ssd_fr_2 = torch.real(ssd_f_2)

    #     ssd_fr_2 = ssd_fr_2[:, 0:mh, 0:mw]

    #     ssd_batch = (ssd_fr_1 - 2 * ssd_fr_2)


    #     if not omit_monomial:
    #         opt = torch.sum(template_patch**2, dim=(1, 2, 3))
    #         opt = opt.view(b, 1, 1)
    #         ssd_batch += opt

    #     if mean:
    #         ssd_batch /= th
    #         ssd_batch /= tw

    #     return ssd_batch


    # def _conv_by_fft(
    #     self, search_patch, template_patch, *, 
    #     mean=True, softmax=False
    # ):
    #     """
    #     calculate conv of patchs by fft

    #     Param
    #     -----
    #     mean: get mean of template size
    #     """

    #     b, c, sh, sw = search_patch.size()
    #     b, c, th, tw = template_patch.size()
    #     mh, mw = sh - th + 1, sw - tw + 1

    #     device = search_patch.device
        
    #     ref_Tx = torch.zeros((b, c, sh, sw)).to(device)
    #     ref_Tx[:, :, 0:th, 0:tw] = template_patch

    #     tmp1 = torch.fft.fft2(search_patch)
    #     tmp2 = torch.fft.fft2(ref_Tx)

    #     tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)
    #     conv_f = torch.fft.ifft2(tmp_sum)

    #     conv_f = torch.real(conv_f)

    #     conv_f = conv_f[:, 0:mh, 0:mw]

    #     if mean:
    #         conv_f /= th
    #         conv_f /= tw

    #     if softmax:
    #         conv_f = conv_f.unsqueeze(dim=0)
    #         conv_f = self._spatial_softmax(conv_f)
    #         conv_f = conv_f.squeeze()

    #     return conv_f


    # def _spatial_softmax(self, heatmap):
    #     '''
    #     Apply softmax per channel
    #     '''
    #     b,c,h,w = heatmap.size()
    #     x = heatmap.reshape(b, c, -1).transpose(2, 1)
    #     x = F.softmax(x, dim=1) # batch维度不参与运算
    #     return x.transpose(2, 1).reshape(b, c, h, w)


    def _calculate_loss(self, out, gt_map, *, device=""):
        if device == "":
            device = self.device

        loss = self.Lmatch(out, gt_map)
        loss = loss.to(device)

        return loss


    # def on_epoch_start(self, engine, phase=None):
    #     if phase == "train":
    #         self.curr_epoch = engine.state.epoch 


    # def on_epoch_end(self, engine, phase=None):        
    #     if phase in ["train", "evaluate"]:
    #         info = f"Epoch[{engine.state.epoch}] on {phase} - Metrics:"
    #         for k, v in engine.state.metrics.items():

    #             if type(v) == list:
    #                 for i in range(0, len(v), 2):
    #                     info += f"\n{k}-{v[i]} : {v[i+1]:<15.8f} "
    #             else:
    #                 info += f"\n{k} : {v:<15.8f} "

    #         # print(info)
    #         logger.info(info)


    # def on_iteration_start(self, engine, phase=None):            
    #     if phase == "train":
    #         curr_iter = (engine.state.iteration - 1) % len(self.attached["train_loader"]) + 1

    #         if curr_iter % self.log_freq == 0:
    #             logger.info("Epoch[{}] Iteration[{}/{}] {}".format(engine.state.epoch, curr_iter, len(self.attached["train_loader"]), self.log_str))
    #             # print("Epoch[{}] Iteration[{}/{}] {}".format(engine.state.epoch, curr_iter, len(self.attached["train_loader"]), self.log_str))
        
    #     elif phase == "evaluate":
    #         curr_iter = (engine.state.iteration - 1) % len(self.attached["validation_loader"]) + 1
    #         if curr_iter % self.log_freq == 0:
    #             logger.info("Iteration[{}/{}]".format(curr_iter, len(self.attached["validation_loader"])))


if __name__ == "__main__":
    def _map_to_location(out):
        b, h, w = out.size()

        loc = torch.argmax(out.view(b, -1), dim=1)
        row = loc // w
        col = loc % w
        xy = torch.stack((col, row), dim=1)
        
        return xy


    def _l2_dis(out, gt_shift):
        pred_loc = _map_to_location(out)
        
        square = torch.sum((pred_loc-gt_shift) ** 2, dim=1)
        dis = torch.sqrt(square)

        return dis

    shift = torch.randint(15, (2, 2))
    gt_shift = torch.randint(15, (2, 2))

    maps = torch.randint(10, (2, 16, 16))
    for i in range(shift.size(0)):
        maps[i, shift[i][1], shift[i][0]] = 11

    dis = _l2_dis(maps, gt_shift)

    print(shift)
    print(gt_shift)
    print(dis)






