import numpy as np
import torch
import torch.nn.functional as F

from torch.autograd.gradcheck import zero_gradients
from attack.base_attack import BaseAttack

class Attack(BaseAttack):
    def __init__(self, model,config):
        super().__init__('JSMA', model, config, batch_size = 1, targeted = True, llc = False)
        self.model = self.model.eval()

    def compute_jacobian(self, input):
      
        self.model.eval()
        output = self.model(input)

        num_features = int(np.prod(input.shape[1:]))
        jacobian = torch.zeros([output.size()[1], num_features])
        mask = torch.zeros(output.size()).to(self.model.device) 
        for i in range(output.size()[1]):
            mask[:, i] = 1
            zero_gradients(input)
            output.backward(mask, retain_graph=True)
            # 将导数导入模型
            jacobian[i] = input._grad.squeeze().view(-1, num_features).clone()
            mask[:, i] = 0  # reset

        return jacobian.to(self.model.device)

    def saliency_map(self, jacobian, target_class, increasing, search_space, total_num_features):
        """
        需要再看，理解（保留部分注释）
        :param increasing: to increase tor decrease pixel intensities
        :param search_space: the features indicate the perturbation search space
        :return: a pair of pixel
        """

        domain = torch.eq(search_space, 1).float() 
        # the sum of all features' derivative with respect to each class
        all_sum = torch.sum(jacobian, dim=0, keepdim=True)
        target_grad = jacobian[target_class]  # The forward derivative of the target class
        others_grad = all_sum - target_grad  # The sum of forward derivative of other classes

        # this list blanks out those that are not in the search domain
        if increasing:
            increase_coef = 2 * (torch.eq(domain, 0)).float().to(self.model.device)
        else:
            increase_coef = -1 * 2 * (torch.eq(domain, 0)).float().to(self.model.device)
        increase_coef = increase_coef.view(-1, total_num_features)

        # calculate sum of target forward derivative of any 2 features.
        target_tmp = target_grad.clone()
        target_tmp -= increase_coef * torch.max(torch.abs(target_grad))
        alpha = target_tmp.view(-1, 1, total_num_features) + target_tmp.view(-1, total_num_features, 1)  
        # calculate sum of other forward derivative of any 2 features.
        others_tmp = others_grad.clone()
        others_tmp += increase_coef * torch.max(torch.abs(others_grad))
        beta = others_tmp.view(-1, 1, total_num_features) + others_tmp.view(-1, total_num_features, 1)

        # zero out the situation where a feature sums with itself
        tmp = np.ones((total_num_features, total_num_features), int)
        np.fill_diagonal(tmp, 0)
        zero_diagonal = torch.from_numpy(tmp).byte().to(self.model.device)

        # According to the definition of saliency map in the paper (formulas 8 and 9),
        # those elements in the saliency map that doesn't satisfy the requirement will be blanked out.
        if increasing:
            mask1 = torch.gt(alpha, 0.0)
            mask2 = torch.lt(beta, 0.0)
        else:
            mask1 = torch.lt(alpha, 0.0)
            mask2 = torch.gt(beta, 0.0)
        # apply the mask to the saliency map
        mask = torch.mul(torch.mul(mask1, mask2), zero_diagonal.view_as(mask1))
        # do the multiplication according to formula 10 in the paper
        saliency_map = torch.mul(torch.mul(alpha, torch.abs(beta)), mask.float())
        # get the most significant two pixels
        max_value, max_idx = torch.max(saliency_map.view(-1, total_num_features * total_num_features), dim=1)
        p = max_idx // total_num_features
        q = max_idx % total_num_features
        return p, q

    def attack_batch(self, images, targets,):
       
        assert images.shape[0] == 1, '只攻击单个图像'
        image = np.copy(images)
        var_image = torch.from_numpy(image).to(self.model.device).float()
        var_image.requires_grad = True
        #var_target = torch.LongTensor(targets).to(self.model.device).float()

        if self.config['theta'] > 0:
            increasing = True
        else:
            increasing = False

        num_features = int(np.prod(image.shape[1:]))
        shape = var_image.size()

        max_iters = int(np.ceil(num_features * self.config['gamma'] / 2.0))

        # masked search domain, if the pixel has already reached the top or bottom, we don't bother to modify it.
        if increasing:
            search_domain = torch.lt(var_image, 0.99).to(self.model.device)
        else:
            search_domain = torch.gt(var_image, 0.01).to(self.model.device)
        search_domain = search_domain.view(num_features)

        self.model.eval().to(self.model.device)
        output = self.model(var_image)
        current = torch.max(output.data, 1)[1].cpu().numpy()

        iter = 0
        while (iter < max_iters) and (current[0] != targets[0]) and (search_domain.sum() != 0):
  
            jacobian = self.compute_jacobian(input=var_image)
            # get the saliency map and calculate the two pixels that have the greatest influence
            p1, p2 = self.saliency_map(jacobian, targets, increasing, search_domain, num_features)
            
            var_image_flatten = var_image.view(-1, num_features)
            var_image_flatten[0, p1] += self.config['theta']
            var_image_flatten[0, p2] += self.config['theta']

            new_sample = torch.clamp(var_image_flatten, min=0.0, max=1.0)
            new_sample = new_sample.view(shape)
            search_domain[p1] = 0
            search_domain[p2] = 0
            var_image = new_sample.clone().detach().requires_grad_(True)
            var_image.requires_grad=True

            output = self.model(var_image)
            current = torch.max(output.data, 1)[1].cpu().numpy()
            iter += 1

        adv_image = var_image.data.cpu().numpy()
        return adv_image

    def attack(self, images, targets):
        assert len(images) == len(targets)
        print('JSMA attack perturbs the samples now ...... \n')
        iter = 0
        adv_images = []
        for iter in range(len(images)):
            adv_image = self.attack_batch(images=images[iter: iter + 1], targets=targets[iter: iter + 1])
            adv_images.extend(adv_image)
            iter += 1
            print("Caculating in iteration {},total 1000".format(iter))
        return np.array(adv_images)