import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd.gradcheck import zero_gradients

from attack.base_attack import BaseAttack

class Attack(BaseAttack):
    def __init__(self, model, config):
        super().__init__('DEEPFOOL', model, config, batch_size = 1, targeted = False, llc = False)

    def attack_batch(self,images):
        assert images.shape[0] == 1, '只攻击单个图像'
        image = np.copy(images)
        var_image = torch.from_numpy(image).to(self.model.device).float()
        var_image.requires_grad = True

        self.model.eval()
        prediction = self.model(var_image)
        original = torch.max(prediction, 1)[1]
        current = original

        I = np.argsort(prediction.data.cpu().numpy() * -1)# prediction中的数的编号（由大到小）排列
        perturbation = np.zeros(image.shape, dtype=np.float32)
        iteration = 0

        while (original == current) and (iteration < self.config['max_iterations']):

            zero_gradients(var_image)

            self.model.eval()

            prediction_iter = self.model(var_image)
            current = torch.max(prediction_iter, 1)[1]
            prediction_iter[0, I[0, 0]].backward(retain_graph=True)
            grad_original = np.copy(var_image.grad.data.cpu().numpy())

            # 对每一类标签计算grad和grad_f
            closest_dist = 1e10
            for i in range(1, 10):
                zero_gradients(var_image)
                prediction_iter[0, I[0, i]].backward(retain_graph=True)
                grad_current = var_image.grad.data.cpu().numpy().copy()
            
                grad = grad_current - grad_original
                grad_f = (prediction_iter[0, I[0, i]] - prediction_iter[0, I[0, 0]]).detach().data.cpu().numpy()
                
                dist_i = np.abs(grad_f) / (np.linalg.norm(grad.flatten()) + 1e-15)#计算grad距离范数
                if dist_i < closest_dist:
                    closest_dist = dist_i
                    closest_grad = grad

            # 扰动叠加计算
            per_i = (closest_dist + 1e-4) * closest_grad / np.linalg.norm(closest_grad)
            perturbation = perturbation + per_i

            temp_image = np.clip((1 + self.config['overshoot']) * perturbation + image, 0.0, 1.0)
            var_image = torch.from_numpy(temp_image).to(self.model.device)
            var_image.requires_grad=True

            iteration += 1
            
        adv_image = np.clip(image + (1 + self.config['overshoot']) * perturbation, 0.0, 1.0)
        return adv_image, perturbation, iteration #此处adv_image为DEEPFOOL返回的主要参数，其余两个在UAP中有用

    def attack(self,images,labels):

        adv_images = []

        print('DeepFool attack perturbs the images now ......\n')

        for i in range(len(images)):
            adv_image, _, _ = self.attack_batch(images=images[i: i + 1])
            adv_images.extend(adv_image)
        return np.array(adv_images)
