import logging

import torch

# from ignite.metrics import SSIM, PSNR

from trainer.despeckle_trainer import DespeckleTrainer

from metrics.calc_ssim import CalcSSIM
from metrics.calc_psnr import CalcPSNR
from metrics.ENL import ENL

logger = logging.getLogger("match")


class rdnetTrainer(DespeckleTrainer):
    cnt=0
    sample=0

    # log_freq = 100 # print iter info in log with on_iteration_start
    tb_img_freq = 100 # frequency to call tensorboard log_image_grid, 0 means not use log_image_grid

    record_patch = False

    def __init__(self, model, optimizer, loss_fn, logger, config):
        super(rdnetTrainer, self).__init__(
            model, optimizer, loss_fn, logger, config
        )

        self.logger = logger

        self.optim_fts = optimizer["Fts"]

        self.desp_model = model["Despeckle"]

        self.mse = loss_fn["mse"][0]
        self.TV = loss_fn["TV"][0]
        self.lambda_ = 0.1
        
        if  "record_patch" in dir(self.opts):
            self.record_patch = self.opts.record_patch
            self.record_logger = self.opts.record_logger
            self.lambda_ = self.opts.Lambda


    def train(self, engine, batch):
        self.desp_model.train()

        log_str = ""
        curr_step = self.logger.counters["train"]

        patch, noisy_patch = self._prepare_batch(batch)

        self._zero_grad()

        despeckle_img, _ = self.desp_model(noisy_patch)

        loss, info = self._calculate_loss(despeckle_img, patch, 
            noisy=noisy_patch)

        if self.tb_img_freq!=0 or engine.state.iteration%self.tb_img_freq==0:
            self.logger.log_image_grid(
                "train/despeckle", despeckle_img.unsqueeze(1), "train"
            )
            self.logger.log_image_grid(
                "train/noisy", noisy_patch.unsqueeze(1), "train"
            )
            self.logger.log_image_grid(
                "train/patch", patch.unsqueeze(1), "train"
            )

        self.logger.add_scalar('train/loss', loss.item(), curr_step)

        log_str += "Despeckle: {:.5f} \t".format(loss.item())
        log_str += "MSE: {:.5f} \t".format(info["mse"].item())
        log_str += "TV: {:.5f} \t".format(info["tv"].item())

        loss.backward()
        self.optim_fts.step()

        self.log_str = log_str

        return despeckle_img, patch

    # use on noisy OPT
    def evaluate(self, engine, batch):
        self.desp_model.eval()

        with torch.no_grad():

            sampler_dict = {}

            patch, noisy_patch = self._prepare_batch(batch)
            # noisy_patch = self._prepare_batch(batch)
            # patch = noisy_patch

            if self.record_patch:
                sampler_dict.update({
                    "patch=img-img" : patch
                })

            despeckle_img, _ = self.desp_model(noisy_patch)

            # # transfer [min(despeckle_img, patch), max(despeckle_img, patch)]
            # # to [0., 1.]
            # b, c, h, w = despeckle_img.shape
            # despeckle_img = despeckle_img.view(b, c, -1)
            # maximum = despeckle_img.max(dim=2, keepdim=True)[0]
            # minimum = despeckle_img.min(dim=2, keepdim=True)[0]
            # maximum[maximum<1.0] = 1.0
            # minimum[minimum>0.0] = 0.0
            # despeckle_img = (despeckle_img-minimum) / (maximum-minimum)
            # despeckle_img = despeckle_img.view(b, c, h, w)
            # patch = patch.view(b, c, -1)
            # patch = (patch-minimum) / (maximum-minimum)
            # patch = patch.view(b, c, h, w)

            scalar = self._metric_per_patch(despeckle_img, patch)

            if self.record_logger:
                sampler_dict.update({
                    "scalar-scalar" : dict(**scalar), 
                    # "sar=mft-fmap_chs" : featrueA, 
                    # "opt=mft-fmap_chs" : featrueB, 
                    "noisy=img-img" : noisy_patch,
                    "despeckle=img-img" : despeckle_img,
                    "normal=img-img" : patch
                })

        self.logger.info(num=engine.state.iteration, **sampler_dict)
        
        return despeckle_img, patch

    # # use on SAR
    # def evaluate(self, engine, batch):
    #     self.desp_model.eval()

    #     with torch.no_grad():

    #         sampler_dict = {}

    #         noisy_patch = self._prepare_batch(batch)
    #         patch = noisy_patch

    #         if self.record_patch:
    #             sampler_dict.update({
    #                 "patch=img-img" : patch
    #             })

    #         despeckle_img, noise = self.desp_model(noisy_patch)

    #         scalar = self._metric_per_patch(despeckle_img, patch)

    #         if self.record_logger:
    #             sampler_dict.update({
    #                 "scalar-scalar" : dict(**scalar), 
    #                 # "sar=mft-fmap_chs" : featrueA, 
    #                 # "opt=mft-fmap_chs" : featrueB, 
    #                 "noisy=img-img" : noisy_patch,
    #                 "despeckle=img-img" : despeckle_img,
    #                 "normal=img-img" : noise
    #             })

    #     self.logger.info(num=engine.state.iteration, **sampler_dict)
        
    #     return despeckle_img, patch


    def _calculate_loss(self, predict_img, label_img, *, device="", **kwargs):
        if device == "":
            device = self.device

        noisy = None
        if "noisy" in kwargs:
            noisy = kwargs["noisy"]

        mse_loss = self.mse(predict_img, label_img, noisy)
        tv_loss = self.TV(predict_img, label_img)
        mse_loss = mse_loss.to(device)
        tv_loss = tv_loss.to(device)
        loss = mse_loss + tv_loss * self.lambda_

        info = {"mse": mse_loss, "tv": tv_loss}
        # print(mse_loss.item(), "\t|||\t", tv_loss.item())
        return loss, info


    def _metric_per_patch(self, predict_img, label_img):

        info = {}
        metric_ = {
            "ENL" : ENL()
            # "SSIM" : CalcSSIM(data_range=1.0), 
            # "PSNR" : CalcPSNR(data_range=1.0)
        }

        for n, m in metric_.items():

            m_res = []

            for p, l in zip(predict_img, label_img):

                m.update((p.unsqueeze(dim=0), l.unsqueeze(dim=0)))
                res = m.compute()
                m_res.append(res)

            m_res = torch.tensor(m_res)
            info[n] = m_res

        return info
