import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd.gradcheck import zero_gradients


from attack.base_attack import BaseAttack
from attack.methods.DEEPFOOL import Attack as DEEPFOOL_Attack 

class Attack(BaseAttack):
    def __init__(self, model, config):
        super().__init__('UAP', model, config, batch_size = None, targeted = False, llc = False)
        self.train_dataset,self.valid_dataset = self.model.get_train_valid_loader(batch_size = 1,
                                                                                    valid_size = 0.1,
                                                                                    transform_train = True,
                                                                                    shuffle = True)

    def projection_per(self, perturbation, eps):
        
        """
        The perturbation perturbation is projected on the l\inf norm of radius eps
        :param perturbation: the perturbation
        :param eps: the magnitude of perturbation in L\inf norm
        """

        perturbation = np.sign(perturbation) * np.minimum(abs(perturbation), eps)
        return perturbation

    def attack_batch(self,images):

        print('Starting to compute the universal perturbation with the training dataset ......\n')

        iteration = 0
        ratio = 0.0
        universal_per = torch.zeros(size=iter(self.train_dataset).next()[0].shape)

        while ratio < self.config['fooling_rate'] and iteration < self.config['max_iter_universal']:
            
            print('Computing in iteration: {}'.format(iteration))
            self.model.eval()

            for index, (image, label) in enumerate(self.train_dataset):

                original_predict = torch.max(self.model(image.to(self.model.device)), 1)[1]  
                perturbed_image = torch.clamp(image + universal_per, 0.0, 1.0)  
                current_predict = torch.max(self.model(perturbed_image.to(self.model.device)), 1)[1]


                if torch.equal(original_predict,current_predict): 
                    deepfool = DEEPFOOL_Attack(model=self.model, config=self.config)
                    _, deepfool_per, deepfool_iteration = deepfool.attack_batch(images=perturbed_image.numpy())
                    if deepfool_iteration < self.config['max_iter_deepfool'] - 1:
                        universal_per += torch.from_numpy(deepfool_per)
                        universal_per = self.projection_per(perturbation=universal_per, eps=self.config['epsilon'])

            iteration += 1

            print('Computing the fooling rate current_predict the universal adversarial perturbation ......')

            success, total = 0.0, 0.0
            for index, (validation_image, label) in enumerate(self.valid_dataset):
                label = label.to(self.model.device)
                original_predict = torch.max(self.model(validation_image.to(self.model.device)), 1)[1]  
                perturbed_validation_image = torch.clamp(validation_image + universal_per, 0.0, 1.0)  
                current_predict = torch.max(self.model(perturbed_validation_image.to(self.model.device)), 1)[1]

                if original_predict != current_predict and current_predict != label:
                    success += 1
                total += 1
            ratio = success / total
            print('Current_predict fooling rate is {}/{}={}\n'.format(success, total, ratio))
        return universal_per

    def attack(self, images, labels):
        print(len(self.train_dataset),len(self.valid_dataset))
        adv_images = []
        universal_per = self.attack_batch(images)

        for i in range(len(images)):
            adv_image = images[i: i + 1] + universal_per
            adv_image = np.clip(adv_image, 0.0, 1.0)
            adv_images.extend(adv_image)

        return np.array(adv_images)