
import numpy as np
import torch
import copy
from torch.autograd import Variable

from .attack import Attack

class DEEPFOOL(Attack):
    def __init__(self, model=None, device=None, IsTargeted=None, config=None, nb_candidate=10):
        """
        @description: Fast Gradient Sign Method (FGSM)
        @param {
            model:需要测试的模型
            device: 设备(GPU)
            IsTargeted:是否是目标攻击
            kwargs: 用户对攻击方法需要的参数
        }
        @return: None
        """
        super(DEEPFOOL, self).__init__(model, device, IsTargeted)
        # 使用该函数时候，要保证训练模型的标签是从0开始，而不是1
        self.criterion = torch.nn.CrossEntropyLoss()
        self._parse_params(config)
        self.nb_candidate = nb_candidate
        self.clip_min = 0.0
        self.clip_max = 1.0

    def _parse_params(self, config):
        """
        @description: 
        @param {
            overshoot:
            max_iter:
        } 
        @return: None
        """
        self.overshoot = float(config.get("overshoot", 0.02))
        self.max_iter = int(config.get("max_iter", 50))

    def generate(self, images, labels):
        """
        @description:
        @param {
            xs:原始的样本
            ys:样本的标签
        }
        @return: adv_xs{numpy.ndarray}
        """
        device = self.device
        targeted = self.IsTargeted

        x = images.to(device)
        labels = labels.to(device)
        model = self.model.to(device)
        with torch.no_grad():
            logits = model(x)
        self.nb_classes = logits.size(-1)
        assert self.nb_candidate <= self.nb_classes, 'nb_candidate should not be greater than nb_classes'

        # preds = logits.topk(self.nb_candidate)[0]
        # grads = torch.stack(jacobian(preds, x, self.nb_candidate), dim=1)
        # grads will be the shape [batch_size, nb_candidate, image_size]

        adv_x = x.clone().requires_grad_()

        iteration = 0
        logits = model(adv_x)
        current = logits.argmax(dim=1)
        if current.size() == ():
            current = torch.tensor([current])
        w = torch.squeeze(torch.zeros(x.size()[1:])).to(device)
        r_tot = torch.zeros(x.size()).to(device)
        original = current

        if not targeted:
            while ((current == original).any and iteration < self.max_iter):
                predictions_val = logits.topk(self.nb_candidate)[0]
                gradients = torch.stack(jacobian(predictions_val, adv_x, self.nb_candidate), dim=1)
                with torch.no_grad():
                    for idx in range(x.size(0)):
                        pert = float('inf')
                        if current[idx] != original[idx]:
                            continue
                        for k in range(1, self.nb_candidate):
                            w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
                            f_k = predictions_val[idx, k] - predictions_val[idx, 0]
                            pert_k = (f_k.abs() + 0.00001) / w_k.view(-1).norm()
                            if pert_k < pert:
                                pert = pert_k
                                w = w_k

                        r_i = pert * w / w.view(-1).norm()
                        r_tot[idx, ...] = r_tot[idx, ...] + r_i

                adv_x = torch.clamp(r_tot + x, self.clip_min, self.clip_max).requires_grad_()
                logits = model(adv_x)
                current = logits.argmax(dim=1)
                if current.size() == ():
                    current = torch.tensor([current])
                iteration = iteration + 1
        else:
            while ((current != labels).any and iteration < self.max_iter):
                predictions_val, _class_indexs = logits.topk(1)
                indices = labels
                indices = indices.unsqueeze(1)
                target_label_predict = logits.gather(1, indices)
                predictions_val = torch.cat((predictions_val, target_label_predict), dim=1)
                self.nb_candidate = 2
                # print(target_label_predict.shape)
                # print(predictions_val)
                gradients = torch.stack(jacobian(predictions_val, adv_x, self.nb_candidate), dim=1)
                with torch.no_grad():
                    for idx in range(x.size(0)):
                        pert = float('inf')
                        if current[idx] == labels[idx]:
                            continue
                        for k in range(1, self.nb_candidate):
                            w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
                            f_k = predictions_val[idx, k] - predictions_val[idx, 0]
                            pert_k = (f_k.abs() + 0.00001) / w_k.view(-1).norm()
                            if pert_k < pert:
                                pert = pert_k
                                w = w_k

                        r_i = pert * w / w.view(-1).norm()
                        r_tot[idx, ...] = r_tot[idx, ...] + r_i

                adv_x = torch.clamp(r_tot + x, self.clip_min, self.clip_max).requires_grad_()
                logits = model(adv_x)
                current = logits.argmax(dim=1)
                if current.size() == ():
                    current = torch.tensor([current])
                iteration = iteration + 1

        adv_x = torch.clamp((1 + self.overshoot) * r_tot + x, self.clip_min, self.clip_max)
        return adv_x

def jacobian(predictions, x, nb_classes):
    list_derivatives = []

    for class_ind in range(nb_classes):
        outputs = predictions[:, class_ind]
        derivatives, = torch.autograd.grad(outputs, x, grad_outputs=torch.ones_like(outputs), retain_graph=True)
        list_derivatives.append(derivatives)

    return list_derivatives