import logging

import torch
import torch.nn.functional as F

from trainer.base_trainer import BaseTrainer

logger = logging.getLogger("match")

class integrationTrainer(BaseTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 20 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(integrationTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]
        self.optim_ftsh = optimizer["FtsH"]

        self.FtsA = model["FtsA"]
        self.FtsB = model["FtsB"] if "FtsB" in model else self.FtsA
        self.FtsM = []
        if "FtsM" in model:
            self.FtsM.append(model["FtsM"])
        if "FtsM1" in model:
            self.FtsM.append(model["FtsM1"])
        if "FtsM2" in model:
            self.FtsM.append(model["FtsM2"])
        if len(self.FtsM) == 0:
            self.FtsM = None

        self.Tmatch = loss_fn["Tmatch"][0]
        self.Pmatch = loss_fn["Pmatch"][0]
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch

        if not "frozen" in dir(self.opts):
            self.frozen = False
        else:
            self.frozen = self.opts.frozen
        if not "feat" in dir(self.opts):
            self.feat = "desc"
        else:
            self.feat = self.opts.feat
        
        self.Lambda = 1
        self.L_start = 10
        if "Lambda" in dir(self.opts):
            self.Lambda = self.opts.Lambda
        if "L_start" in dir(self.opts):
            self.L_start = self.opts.L_start


    def get_feat(self, tensors):
        ret = None
        if self.feat == "desc":
            ret = tensors[0]
        elif self.feat == "norm_desc":
            ret = tensors[1]
        elif self.feat == "fm":
            ret = tensors[2]

        return ret


    def train(self, engine, batch):
        self.FtsA.train()
        self.FtsB.train()
        if not self.FtsM is None:
            for net in self.FtsM:
                net.train()
        
        log_str = ""
        curr_step = self.logger.counters["train"]

        patch_pair, info =  self._prepare_batch(batch)
        sar_patchs, opt_patchs = patch_pair
        labels, sar_crop, opt_crop, gt_map, shift = info
        
        self._zero_grad()

        if self.frozen:
            with torch.no_grad():
                descA, norm_descA, fmA = self.FtsA(sar_patchs)
                descB, norm_descB, fmB = self.FtsB(opt_patchs)
        else:
            descA, norm_descA, fmA = self.FtsA(sar_patchs)
            descB, norm_descB, fmB = self.FtsB(opt_patchs)

        featA = self.get_feat((descA, norm_descA, fmA))
        featB = self.get_feat((descB, norm_descB, fmB))

        if self.FtsM is None:
            vecA, vecB = featA, featB
            logger.info("Warning : FtsM is None!")
        else:
            if len(self.FtsM) == 1:
                vec = torch.cat((featA, featB), dim=0)
                vec = self.FtsM[0](vec)
                b_size = vec.size(0) // 2
                vecA, vecB = vec[:b_size], vec[b_size:]
            else:
                vecA = self.FtsM[0](featA)
                vecB = self.FtsM[1](featB)

        featrueA, featrueB = norm_descA, norm_descB
        featrueA = self.crop_feature_map(featrueA, sar_crop)
        featrueB = self.crop_feature_map(featrueB, opt_crop)
        h_a, h_b = featrueA.size(2), featrueB.size(2)
        if h_a >= h_b:
            s_p = featrueA
            t_p = featrueB
        else:
            s_p = featrueB
            t_p = featrueA
        corr_map = self.mtc_fn(s_p, t_p)

        loss, Ploss, Tloss = self._calculate_loss(
            corr_map, gt_map, vecA, vecB)

        log_str += "Loss: {:.5f} \t".format(loss.item())
        log_str += "Ploss: {:.5f} \t".format(Ploss.item())
        log_str += "Tloss: {:.5f} \t".format(Tloss.item())

        loss.backward()
        self.optim_fts.step()
        self.optim_ftsh.step()

        self.log_str = log_str

        l2 = self._l2_dis(corr_map.detach(), shift)

        return l2, None


    def evaluate(self, engine, batch):

        mode = ""

        with torch.no_grad():
            patch_pair, info = self._prepare_batch(batch)
            sar_patchs, opt_patchs = patch_pair

            if not type(info) == list:
                mode = "patch" 
            elif len(info) == 2:
                mode = "template" 
            elif len(info) == 5:
                mode = "integration" 

        l2 = None
        scores = None

        if mode in ("patch", "integration"):
            
            patch_data = patch_pair
            info_data = info
            if mode == "integration":
                info_data = info_data[0]

            _, scores = self.evaluate_patch_m(patch_data, info_data, engine)

        if mode in ("template", "integration"):

            flag = True
            patch_data = patch_pair
            info_data = info

            if mode == "integration":
                labels, sar_crop, opt_crop, gt_map, shift = info_data
                sar_input = self.crop_feature_map(patch_data[0], sar_crop)
                opt_input = self.crop_feature_map(patch_data[1], opt_crop)

                ids = (labels==1)
                sar_input = sar_input[ids]
                opt_input = opt_input[ids]
                gt_map = gt_map[ids]
                shift = shift[ids]

                patch_data = (sar_input, opt_input)
                info_data = (gt_map, shift)

                if sum(ids) == 0:
                    flag = False

            if flag:
                l2, _ = self.evaluate_template_m(patch_data, info_data, engine)

        return l2, scores


    def evaluate_patch_m(self, patch_pair, info, engine):
        self.FtsA.eval()
        self.FtsB.eval()
        if not self.FtsM is None:
            for net in self.FtsM:
                net.eval()

        with torch.no_grad():
            sar_patchs, opt_patchs = patch_pair
            labels = info

            descA, norm_descA, fmA = self.FtsA(sar_patchs)
            descB, norm_descB, fmB = self.FtsB(opt_patchs)

            featA = self.get_feat((descA, norm_descA, fmA))
            featB = self.get_feat((descB, norm_descB, fmB))

            if self.FtsM is None:
                vecA, vecB = featA, featB
                logger.info("Warning : FtsM is None")
            else:
                if len(self.FtsM) == 1:
                    vec = torch.cat((featA, featB), dim=0)
                    vec = self.FtsM[0](vec)
                    b_size = vec.size(0) // 2
                    vecA, vecB = vec[:b_size], vec[b_size:]
                else:
                    vecA = self.FtsM[0](featA)
                    vecB = self.FtsM[1](featB)
                    
        dists = torch.sqrt(torch.sum((vecA - vecB) ** 2, 1))

        # self.sampler_dict.update({
        #     "scalar-scalar" : {
        #         "dists" : dists, 
        #         "labels" : labels
        #     }
        # })
        # if self.record_patch:
        #     self.sampler_dict.update({
        #         "sar=img-img" : sar_patchs, 
        #         "opt=img-img" : opt_patchs,
        #     })
    
        # self.logger.info(num=engine.state.iteration, **sampler_dict)

        return None, (dists, labels)


    def evaluate_template_m(self, patch_pair, info, engine):
        self.FtsA.eval()
        self.FtsB.eval()
        if not self.FtsM is None:
            for net in self.FtsM:
                net.eval()

        with torch.no_grad():
            sar_patchs, opt_patchs = patch_pair
            labels, shift = info

            descA, featrueA, fmA = self.FtsA(sar_patchs)
            descB, featrueB, fmB = self.FtsB(opt_patchs)

            h_a, h_b = featrueA.size(2), featrueB.size(2)
            if h_a >= h_b:
                s_p = featrueA
                t_p = featrueB
            else:
                s_p = featrueB
                t_p = featrueA

            corr_map = self.mtc_fn(s_p, t_p)

        l2, loc = self._l2_dis((corr_map).detach(), shift, ret_pred=True)

        sampler_dict = {
            "scalar-scalar" : {
                "l2=dis" : l2, 
                "loc" : loc
            }, 
            "sar=mft-fmap_chs" : featrueA, 
            "opt=mft-fmap_chs" : featrueB, 
            "corr=map-fmap_chs" : corr_map,  
        }
        if self.record_patch:
            sampler_dict.update({
                "sar=img-img" : sar_patchs, 
                "opt=img-img" : opt_patchs, 
                "gt=map-img" : labels, 
            })
    
        self.logger.info(num=engine.state.iteration, **sampler_dict)

        return l2, None


    def crop_feature_map(self, feat, crop):
        new_tensor = []
        for t, c in zip(feat, crop):
            new_tensor.append(t[:, int(c[1]):int(c[3]), int(c[0]):int(c[2])])

        ret = torch.stack(new_tensor, dim=0)

        return ret


    def _calculate_loss(self, out, gt_map, vec_a, vec_b, 
        *, device=""):
        if device == "":
            device = self.device

        Ploss = self.Pmatch(vec_a, vec_b)
        Ploss = Ploss.to(device)

        Tloss = self.Tmatch(out, gt_map)
        Tloss = Tloss.to(device)

        loss = self.L_param * Ploss + Tloss

        return loss, Ploss, Tloss


    def on_epoch_start(self, engine, phase=None):
        if phase == "train":
            self.curr_epoch = engine.state.epoch

            if self.L_start >  self.curr_epoch:
                self.L_param = 0
            else:
                self.L_param = self.Lambda
            


if __name__ == "__main__":
    def _map_to_location(out):
        b, h, w = out.size()

        loc = torch.argmax(out.view(b, -1), dim=1)
        row = loc // w
        col = loc % w
        xy = torch.stack((col, row), dim=1)
        
        return xy


    def _l2_dis(out, gt_shift):
        pred_loc = _map_to_location(out)
        
        square = torch.sum((pred_loc-gt_shift) ** 2, dim=1)
        dis = torch.sqrt(square)

        return dis

    shift = torch.randint(15, (2, 2))
    gt_shift = torch.randint(15, (2, 2))

    maps = torch.randint(10, (2, 16, 16))
    for i in range(shift.size(0)):
        maps[i, shift[i][1], shift[i][0]] = 11

    dis = _l2_dis(maps, gt_shift)

    print(shift)
    print(gt_shift)
    print(dis)






