import torch
import torch.nn as nn
import cv2

from ..attack import Attack


class ATA(Attack):
    r"""
    BIM or iterative-FGSM in the paper 'Adversarial Examples in the Physical World'
    [https://arxiv.org/abs/1607.02533]

    Distance Measure : Linf

    Arguments:
        model (nn.Module): model to attack.
        eps (float): maximum perturbation. (Default: 4/255)
        alpha (float): step size. (Default: 1/255)
        steps (int): number of steps. (Default: 0)

    .. note:: If steps set to 0, steps will be automatically decided following the paper.

    Shape:
        - images: :math:`(N, C, H, W)` where `N = number of batches`, `C = number of channels`,        `H = height` and `W = width`. It must have a range [0, 1].
        - labels: :math:`(N)` where each value :math:`y_i` is :math:`0 \leq y_i \leq` `number of labels`.
        - output: :math:`(N, C, H, W)`.

    Examples::
        >>> attack = torchattacks.ATA(model, eps=16/255, alpha=2/255, steps=10, lmd=0.01)
        >>> adv_images = attack(images, labels)
    """
    def __init__(self, model, eps=16/255, alpha=2/255, steps=10, lmd=0.01):
        super().__init__("ATA", model)
        self.eps = eps
        self.alpha = alpha
        self.steps = steps
        self.lmd = lmd
        self.model = model
        self._supported_mode = ['default', 'targeted']

    def forward(self, images, labels):
        r"""
        Overridden.
        """
        images = images.clone().detach().to(self.device)
        labels = labels.clone().detach().to(self.device)

        ori_images = images.clone().detach()
        ori_size = ori_images.size
        print(ori_size)
        atten_ori = self.attention_map(ori_images, labels)

        atten_ori = cv2.resize(atten_ori, ori_size, interpolation=cv2.INTER_LINEAR)

        for _ in range(self.steps):
            images.requires_grad = True
            # features = self.model.features(images)
            # logits = self.model.logits(features)
            # outputs = self.model(images)

            loss1 = self.adv_loss(images, labels)
            loss2 = self.norm_loss(images, labels)
            cost = loss1 + self.lmd * loss2

            # Update adversarial images
            grad = torch.autograd.grad(cost, images,
                                       retain_graph=False,
                                       create_graph=False)[0]

            adv_images = images + self.alpha * grad.sign()
            a = torch.clamp(ori_images - self.eps, min=0)
            b = (adv_images >= a).float() * adv_images \
                + (adv_images < a).float() * a
            c = (b > ori_images + self.eps).float() * (ori_images + self.eps) \
                + (b <= ori_images + self.eps).float() * b
            images = torch.clamp(c, max=1).detach()

        return images

    def adv_loss(self, images, labels, target_labels=None):
        loss = nn.CrossEntropyLoss()
        features = self.model.features(images)
        logits = self.model.logits(features)
        # ceritation = nn.NLLLoss()
        # loss = ceritation(torch.log_softmax(logits, labels))
        print('adv_loss:', loss)
        if self._targeted:
            cost = -loss(logits, target_labels)
        else:
            cost = loss(logits, labels)
        return cost

    def attention_map(self, images, labels):
        features = self.model.features(images)
        logits = self.model.logits(features)
        loss_w = logits[0][labels]
        # loss_w = loss_w.item()
        grad_w = torch.autograd.grad(loss_w, features, retain_graph=False, create_graph=False)[0]
        grad_w_val = grad_w[0, :, :, :]
        weights = torch.mean(grad_w_val, dim=(1, 2), keepdim=True)
        atten_x = torch.zeros_like(features[0, 0, :, :])
        features_val = features[0, :, :, :]

        for i, w in enumerate(weights):
            temp = w * features_val[i, :, :]
            atten_x += temp
        f = nn.ReLU()
        atten_x = f(atten_x)

        return atten_x

    def norm_loss(self, ori, adv):
        temp = adv - ori
        cost = torch.norm(temp, 2)

        return cost








