import logging

import torch
import torch.nn.functional as F

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

DEFAULT_LOSS_WEIGHTS = {
    "match": 1,
    "spatial_softmax": 0,
    "heatmap_l1": 0,
}


class corrASLTrainer(BaseTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 20 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(corrASLTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA

        self.Lmatch = loss_fn["Lmatch"][0]

        self.loss_weights = self.opts['loss_weights'] if 'loss_weights' in self.opts else DEFAULT_LOSS_WEIGHTS.copy()
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch

        # self.mtc_fn = self._ssd_by_fft
        # if hasattr(self.opts, "metric"):
        #     attrs = self.opts.metric.toDict()
        #     name = attrs["name"]
        #     attrs.pop("name")
        #     if name == "SSD":
        #         self.mtc_fn = partial(self._ssd_by_fft, **attrs)
        #     elif name == "Conv":
        #         self.mtc_fn = partial(self._conv_by_fft, **attrs)


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        (sar_patchs, opt_patchs), (labels, shift) = self._prepare_batch(batch)

        self._zero_grad()

        featrueA = self.FtsA(sar_patchs)
        featrueB = self.FtsB(opt_patchs)

        h_a, h_b = featrueA.size(2), featrueB.size(2)
        if h_a >= h_b:
            s_p = featrueA
            t_p = featrueB
        else:
            s_p = featrueB
            t_p = featrueA

        corr_map = self.FtsA.correlation_map(s_p, t_p)

        if self.loss_weights["spatial_softmax"] > 0:
            heatmap_hneg = self.spatial_softmax(corr_map)
        else:
            heatmap_hneg = corr_map

        # corr_map = self.mtc_fn(s_p, t_p)
        match_loss = self.weighted_binary_cross_entropy(heatmap_hneg, labels, device=self.device)

        corr_map = corr_map.squeeze()
        heatmap_hneg = heatmap_hneg.squeeze()

        if self.tb_img_freq!=0 or engine.state.iteration%self.tb_img_freq==0:
            self.logger.log_image_grid("train/sar", sar_patchs, "train")
            self.logger.log_image_grid("train/opt", opt_patchs, "train")
            self.logger.log_image_grid("train/corr_map", corr_map.unsqueeze(1), "train")
            self.logger.log_image_grid("train/gt_map", labels.unsqueeze(1), "train")

        # self.logger.add_scalars('train/loss', {'Lmatch': loss.item()}, curr_step)
        self.logger.add_scalar('train/loss', match_loss.item(), curr_step)
        
        heatmap_l1_loss = self.loss_weights["heatmap_l1"]*corr_map.norm(p=1)

        loss = self.loss_weights["match"]*match_loss + heatmap_l1_loss

        log_str += "Ltotal: {:.5f} \t".format(loss.item())
        log_str += "match_loss: {:.5f} \t".format(match_loss.item())
        log_str += "regularize: {:.5f} \t".format(heatmap_l1_loss.item())

        loss.backward()
        self.optim_fts.step()

        self.log_str = log_str

        l2 = self._l2_dis(corr_map.detach(), shift)

        return l2, None


    def evaluate(self, engine, batch):
        self.FtsA.eval()
        self.FtsB.eval()

        with torch.no_grad():
            x, y = self._prepare_batch(batch)
            sar_patchs, opt_patchs = x
            labels, shift = y

            featrueA = self.FtsA(sar_patchs)
            featrueB = self.FtsB(opt_patchs)

            h_a, h_b = featrueA.size(2), featrueB.size(2)
            if h_a >= h_b:
                s_p = featrueA
                t_p = featrueB
            else:
                s_p = featrueB
                t_p = featrueA

            # corr_map = self.mtc_fn(s_p, t_p)
            corr_map = self.FtsA.correlation_map(s_p, t_p)

            if self.loss_weights["spatial_softmax"] > 0:
                heatmap_hneg = self.spatial_softmax(corr_map)
            else:
                heatmap_hneg = corr_map

        h, w = labels.size(1), labels.size(2)
        corr_map = F.interpolate(corr_map, size=(h, w), mode="bilinear")
        heatmap_hneg = F.interpolate(heatmap_hneg, size=(h, w), mode="bilinear")

        corr_map = corr_map.squeeze()
        heatmap_hneg = heatmap_hneg.squeeze()

        l2, loc = self._l2_dis((corr_map).detach(), shift, ret_pred=True)

        sampler_dict = {
            "scalar-scalar" : {
                "l2=dis" : l2, 
                "loc" : loc
            }, 
            "sar=mft-fmap_chs" : featrueA, 
            "opt=mft-fmap_chs" : featrueB, 
            "corr=map-fmap_chs" : corr_map,  
            "smax=map-fmap_chs" : heatmap_hneg,  
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs, 
                "gt=map-img" : labels, 
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return l2, None


    def _calculate_loss(self, out, gt_map, *, device=""):
        if device == "":
            device = self.device

        loss = self.Lmatch(out, gt_map)
        loss = loss.to(device)

        return loss


    def weighted_binary_cross_entropy(self, heatmap, labels, thresh=0.8, device="cuda", reduction="mean"):
        '''
        二值化热力图，并计算平均误差损失
        '''
        b, c, h, w = heatmap.shape
        weight = torch.sum(labels < thresh)/(torch.sum(labels >= thresh))
        labels = labels.unsqueeze(1)
        downsample = torch.nn.AdaptiveMaxPool2d((h,w))
        labels = downsample(labels)

        return self.Lmatch(heatmap, labels, pos_weight=weight.to(device), reduction=reduction)

    # Apply softmax per channel
    def spatial_softmax(self, heatmap):
        '''
        heatmap每个位置对应的channel个数计算softmax
        '''
        b,c,h,w = heatmap.size()
        x = heatmap.view(b, c, -1).transpose(2, 1)
        x = F.softmax(x, dim=1) # batch维度不参与运算
        return x.transpose(2, 1).view(b, c, h, w)


if __name__ == "__main__":
    def _map_to_location(out):
        b, h, w = out.size()

        loc = torch.argmax(out.view(b, -1), dim=1)
        row = loc // w
        col = loc % w
        xy = torch.stack((col, row), dim=1)
        
        return xy


    def _l2_dis(out, gt_shift):
        pred_loc = _map_to_location(out)
        
        square = torch.sum((pred_loc-gt_shift) ** 2, dim=1)
        dis = torch.sqrt(square)

        return dis

    shift = torch.randint(15, (2, 2))
    gt_shift = torch.randint(15, (2, 2))

    maps = torch.randint(10, (2, 16, 16))
    for i in range(shift.size(0)):
        maps[i, shift[i][1], shift[i][0]] = 11

    dis = _l2_dis(maps, gt_shift)

    print(shift)
    print(gt_shift)
    print(dis)






