import numpy as np
import torch
import torch.nn.functional as F

from attack.base_attack import BaseAttack

class Attack(BaseAttack):
    def __init__(self, model, config):
        super().__init__('EAD', model, config, batch_size = 100, targeted = True, llc = False)
        self.EN = True
        if self.EN:
            print("\nEN Decision Rule")
        else:
            print('\nL1 Decision Rule')
    
    def attack_batch(self, images, targets):
        print("\n") 

        assert len(images) == self.batch_size  
        """
        强制使得两者大小相等,因此见base_method中batch_size的设定
        因为（len(images)=1000）,所以需要执行10遍（batch_size=100）
        未来可在不同攻击下进行参数的更改！
        """
        def attack_achieved(pre_softmax, target_class):
            pre_softmax[target_class] -= self.config['kappa']
            return np.argmax(pre_softmax) == target_class

        def ISTA(new, old): # help function: Iterative Shrinkage-Threshold-ing Algorithm
                with torch.no_grad():
                    sub = new - old
                    var_beta = torch.FloatTensor(np.ones(shape=sub.shape, dtype=float) * self.config['beta']).to(self.model.device)
                    # test if the perturbation is out of bound. If it is, then reduce the perturbation by beta
                    cropped_sub = torch.max(torch.abs(sub) - var_beta, torch.zeros(sub.shape, device=self.model.device)) * sub.sign().to(self.model.device)
                    fist_new = old + cropped_sub
                    return torch.clamp(input=fist_new, min=0.0, max=1.0)

        var_images = torch.from_numpy(images).to(self.model.device)
        var_images.requires_grad = True
        copy_images = np.copy(images)

        #定义参数范围便于搜索
        const_origin = np.ones(shape = self.batch_size, dtype = float) * self.config['init_const']
        const_upper_bound = [1e10] * self.batch_size
        const_lower_bound = np.zeros(self.batch_size)

        # 将targets变成one hot形式
        temp_one_hot_matrix = np.eye(10)
        targets_in_one_hot = []
        for i in range(self.batch_size):
            current_target = temp_one_hot_matrix[targets[i]]
            targets_in_one_hot.append(current_target)
        targets_in_one_hot = torch.from_numpy(np.array(targets_in_one_hot)).to(self.model.device)

        # L2攻击 
        best_l2 = [1e10] * self.batch_size
        adv_images = np.zeros(var_images.size())
        current_prediction_class = [-1] * self.batch_size
        flag = [False] * self.batch_size

        self.model.eval()

        for search_for_const in range(self.config['second_layer_loop']):
            
            slack_var_images = torch.from_numpy(copy_images).to(self.model.device)
            slack_var_images.requires_grad = True
            old_images = slack_var_images.clone()

            # 用SGD去操作扰动，用默认的的参数
            optimizer_old = torch.optim.SGD([slack_var_images], lr=self.config['learning_rate'])
            var_const = torch.from_numpy(const_origin).to(self.model.device) #不可移出循环
            print("\tsecond_layer_loop {}:".format(search_for_const))

            for iter_times in range(self.config['max_iterations']):
                # optimize the old images
                output_old = self.model(slack_var_images).to(self.model.device)
                l2dist_old = torch.sum((slack_var_images - var_images) ** 2, [1, 2, 3])
                kappa_update = torch.FloatTensor([self.config['kappa']] * self.batch_size).to(self.model.device)
                target_loss_old = torch.max((output_old - 1e10 * targets_in_one_hot).max(1)[0] - (output_old * targets_in_one_hot).sum(1), -1 * kappa_update)
                const_loss_old = var_const * target_loss_old
                loss_old = l2dist_old.sum() + const_loss_old.sum()

                optimizer_old.zero_grad()
                loss_old.backward()
                optimizer_old.step()

                # 更新image
                new_images = ISTA(slack_var_images, var_images)
                slack_var_images.data = new_images.data + ((iter_times / (iter_times + 3.0)) * (new_images - old_images)).data
                old_images = new_images.clone()

                # 计算loss_function
                output = self.model(new_images)
                l1dist = torch.sum(torch.abs(new_images - var_images), [1, 2, 3])
                l2dist = torch.sum((new_images - var_images) ** 2, [1, 2, 3])
                target_loss = torch.max((output - 1e10 * targets_in_one_hot).max(1)[0] - (output * targets_in_one_hot).sum(1), -1 * kappa_update)

                if self.EN:
                    decision_loss = self.config['beta'] * l1dist + l2dist + var_const * target_loss
                else:
                    decision_loss = self.config['beta'] * l1dist + var_const * target_loss

                for i, (dist, score, img) in enumerate(
                        zip(decision_loss.data.cpu().numpy(), output.data.cpu().numpy(), new_images.data.cpu().numpy())):
                    if dist < best_l2[i] and attack_achieved(score, targets[i]):
                        best_l2[i] = dist
                        current_prediction_class[i] = np.argmax(score)
                        adv_images[i] = img
                        flag[i] = True
            
            for i in range(self.batch_size):
                if current_prediction_class[i] == targets[i] and current_prediction_class[i] != -1:
                    const_upper_bound[i] = min(const_upper_bound[i], const_origin[i])
                    if const_upper_bound[i] < 1e10:
                        const_origin[i] = (const_lower_bound[i] + const_upper_bound[i]) / 2.0
                else:
                    const_lower_bound[i] = max(const_lower_bound[i], const_origin[i])
                    if const_upper_bound[i] < 1e10:
                        const_origin = (const_lower_bound[i] + const_upper_bound[i]) / 2.0
                    else:
                        const_origin[i] *= 10
        
        count = 0
        for i in range(self.batch_size):
            if flag[i]:
                count += 1
        print("Success: {} images, total {} images".format(count,self.batch_size))

        return np.array(adv_images).astype(np.float32)