# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn

from ..backbone import build_backbone
from detectron2.structures import ImageList
from .build import META_ARCH_REGISTRY

import cv2
import numpy as np

__all__ = ["AseNet"]


class _GradMultiplier(torch.autograd.Function):

    @staticmethod
    def forward(ctx, x, weight):
        if type(weight) is not torch.tensor:
            weight = torch.tensor([weight], device=x.device)
        ctx.save_for_backward(weight)
        return x

    @staticmethod
    def backward(ctx, grad):
        weight, = ctx.saved_tensors

        return grad * weight, None

GradMultiplier = _GradMultiplier.apply

@torch.jit.script
def blend(im0, im1, alpha: float):
    return im0 * alpha + im1 * (1 - alpha)


@torch.jit.script
def brightness(im, factor: float):
    return blend(im, torch.zeros_like(im), factor)


@torch.jit.script
def contrast(im, factor: float):
    return blend(im, im.mean(), factor)


@torch.jit.script
def saturation(im, factor: float):
    return blend(im, im.mean(1).unsqueeze(1), factor)


@torch.jit.script
def adjust_gamma(im, gamma):
    return 255 * (im.clamp(0.1, 254.9) / 255).pow(gamma)


@torch.jit.script
def cross_entropy(x, y, weight):
    return -torch.sum(y * x.log(), dim=-1) * weight

@META_ARCH_REGISTRY.register()
class AseNet(nn.Module):
    """
    Implement AseNet (https://arxiv.org/abs/1708.02002).
    """

    def __init__(self, cfg):
        super().__init__()

        self.device = torch.device(cfg.MODEL.DEVICE)

        # fmt: off
        self.in_feature              = cfg.MODEL.AseNET.IN_FEATURE
        self.in_img                  = cfg.MODEL.AseNET.IN_IMG
        self.log                     = cfg.MODEL.AseNET.LOG
        self.im                      = None
        # fmt: on

        self.backbone = build_backbone(cfg)
        fmap_shape = self.backbone.output_shape()[self.in_feature]
        self.head = AseNETHead(fmap_shape[0])

        if self.log:
            self.head.freeze()
            self.fake_para = nn.Parameter(torch.tensor([0.,]))

        if self.in_img:
            self.im_ori = cv2.imread(self.in_img)
            # self.im_ori = cv2.resize(self.im_ori, (336, 224), interpolation=cv2.INTER_LINEAR)
            self.im = torch.tensor(self.im_ori, dtype=torch.float, device=self.device).permute(2,0,1)
            # self.im = self.im.uniform_(0, 255)
            self.im_ref = self.im.clone().detach().unsqueeze(0)
            self.im_ref = nn.functional.interpolate(self.im_ref, (224, 336), mode="bilinear")

            self.im = nn.Parameter(self.im)
            self.head.freeze()
            self.gamma = nn.Parameter(torch.tensor([0.]))
            self.contrast = nn.Parameter(torch.tensor([0.]))
            self.brightness = nn.Parameter(torch.tensor([0.]))
            self.saturation = nn.Parameter(torch.tensor([0.]))
            self.channel_mix = nn.Parameter(torch.eye(3).view(3, 3, 1, 1))

        pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
        pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
        self.normalizer = lambda x: (x - pixel_mean) / pixel_std

        self.to(self.device)

    def forward(self, batched_inputs):
        """
        Args:
            batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
                Each item in the list contains the inputs for one image.
                For now, each item in the list is a dict that contains:

                * image: Tensor, image in (C, H, W) format.
                * instances: Instances

                Other information that's included in the original dicts, such as:

                * "height", "width" (int): the output resolution of the model, used in inference.
                    See :meth:`postprocess` for details.
        Returns:
            dict[str: Tensor]:
                mapping from a named loss to a tensor storing the loss. Used during training only.
        """
        if self.log:
            image = batched_inputs[0]["image"].to(self.device).unsqueeze(0)
            feature = self.backbone(self.normalizer(image))[self.in_feature]
            pred = self.head(feature)

            print(self.get_mean_single(pred[0]))
            im = image[0].detach().clamp(0, 255).permute(1, 2, 0).cpu().numpy()
            cv2.imwrite("val/{:.3f}.jpg".format(self.get_mean_single(pred[0]).cpu().numpy()), np.uint8(im))

            return {"loss": self.fake_para.pow(2)}
        if self.im is None:
            images = self.preprocess_image(batched_inputs)
            labels = torch.stack([x["label"].to(self.device) for x in batched_inputs]).float()

            feature = self.backbone(images.tensor)[self.in_feature]
            
            pred = self.head(feature)
            target = self.get_target(labels)
            weight = labels.sum(-1) / labels.sum()

            return self.losses(pred, target, weight)
        else:
            im = GradMultiplier(self.im.unsqueeze(0), 65535. / 255)

            transd_im = self.transform(im)

            im = self.normalizer(transd_im)
            im = nn.functional.interpolate(im, (224, 336), mode="bilinear")
            
            feature = self.backbone(im)[self.in_feature]
            pred = self.head(feature)
            
            # x = torch.linspace(-3, 3, 7).to(self.device).unsqueeze(0)
            # filter_1d = (1 / (2 * 3.14159) ** 0.5) * torch.exp(- x ** 2 / 2)
            filter_1d = torch.tensor([0.25, 0.5, 0.25], device=self.device).unsqueeze(0)

            filter_2d = filter_1d * filter_1d.t()
            filter_2d[1, 1] -= filter_2d.sum()
            filter_2d = filter_2d.view(1, 1, 3, 3).expand(1, 1, 3, 3)
            
            small_im = nn.functional.interpolate(self.im.unsqueeze(0), (224, 336), mode="bilinear")
            diff = (small_im - self.im_ref).transpose(0, 1)
            diff = nn.functional.pad(diff, (0, 0, 1, 1), mode="replicate")
            high_freq = nn.functional.conv2d(diff, filter_2d)
            
            if torch.rand((1,)) > 0.95:
                im = transd_im.squeeze(0).detach().clamp(0, 255).permute(1, 2, 0).cpu().numpy()
                diff = (small_im - self.im_ref) * 16 + 127
                diff = diff[0].detach().clamp(0, 255).permute(1, 2, 0).cpu().numpy()
                cv2.imwrite(f"opt/otimized.jpg", np.uint8(im))
                cv2.imwrite(f"opt/diff.jpg", np.uint8(diff))

            return {
                "loss": - self.get_mean_single(pred),
                "loss_fr": high_freq.pow(2).mean(),
                "loss_cr": nn.functional.mse_loss(
                    self.channel_mix.flatten(),
                    torch.eye(3, device=self.device).flatten(),
                    reduction="sum"
                ) * 16
            }
    
    def transform(self, im):
        t_im = adjust_gamma(im, self.gamma.exp())
        t_im = brightness(t_im, self.brightness.exp())
        t_im = nn.functional.conv2d(t_im, self.channel_mix)
        t_im = contrast(t_im, self.contrast.exp())
        t_im = saturation(t_im, self.saturation.exp())
        return t_im

    
    def losses(self, pred, target, weight):
        return {
            "loss": cross_entropy(pred, target, weight).sum(),
        }
    
    @torch.no_grad()
    def get_target(self, labels):
        return labels / (labels.sum(-1, keepdim=True) + 1e-6)
    
    def get_mean_single(self, label):
        return (torch.arange(10, dtype=torch.float, device=self.device) * label).sum() / label.sum()

    def preprocess_image(self, batched_inputs):
        """
        Normalize, pad and batch the input images.
        """
        images = [x["image"].to(self.device) for x in batched_inputs]
        images = [self.normalizer(x) for x in images]
        images = ImageList.from_tensors(images, self.backbone.size_divisibility)
        return images


class AseNETHead(nn.Module):
    def __init__(self, input_dim):
        super().__init__()
        self.fc1 = nn.Linear(input_dim, input_dim)
        self.fc2 = nn.Linear(input_dim, 11)
    
    def forward(self, x):
        x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
        x = x.flatten(1)
        x = self.fc1(x)
        x = nn.functional.relu(x)
        x = self.fc2(x)[..., :10]
        return x.softmax(-1)

    def freeze(self):
        for p in self.parameters():
            p.requires_grad = False
