import numpy as np
import torch
import torch.nn.functional as F

from attack.base_attack import BaseAttack

class Attack(BaseAttack):
    def __init__(self, model,config):
        super().__init__('OPTMARGIN', model, config, batch_size = 100, targeted = False, llc = False)

    def attack_batch(self, images, labels):
        print("\n") 

        assert len(images) == self.batch_size

        # noise初始化
        channel, width, length = images.shape[1:]
        noise_raw = np.random.normal(scale=self.config['noise_magnitude'], size=(channel * length * width, self.config['noise_count'])).astype(np.float32)
        noise_unit_vector, _ = np.linalg.qr(noise_raw)  # turn the noises to orthogonal unit vectors using QR

        assert noise_unit_vector.shape[1] == self.config['noise_count']

        # noise_vector = noise_unit_vector * np.sqrt(channel * width * length) * self.noise_magnitude
        noise_vector = noise_unit_vector * (1.0 / np.max(np.abs(noise_unit_vector))) * self.config['noise_magnitude']
        noise_vector = noise_vector.transpose((1, 0)).reshape((self.config['noise_count'], channel, width, length))
        noise_vector[self.config['noise_count'] - 1] = 0  #noise最后单元设为0 
        noise_vector = torch.from_numpy(noise_vector).to(self.model.device)
        noise_vector.requires_grad = False

         # 将images范围从[0,1]变到[-1,1] ，之后进行arctanh操作将范围变至正负无穷
        mid_point = (self.config['upper_bound'] + self.config['lower_bound']) * 0.5
        half_range = (self.config['upper_bound'] - self.config['lower_bound']) * 0.5
        arctanh_images = np.arctanh((images - mid_point) / half_range * 0.9999)
        var_images = torch.from_numpy(arctanh_images).to(self.model.device)
        var_images.requires_grad = True

        #定义参数范围便于搜索
        const_origin = np.ones(shape = self.batch_size, dtype = float) * self.config['init_const']
        const_upper_bound = [1e10] * self.batch_size
        const_lower_bound = np.zeros(self.batch_size)

         # 将targets变成one hot形式
        temp_one_hot_matrix = np.eye(10)
        targets_in_one_hot = []
        for i in range(self.batch_size):
            current_target = temp_one_hot_matrix[labels[i]]
            targets_in_one_hot.append(current_target)
        targets_in_one_hot = torch.from_numpy(np.array(targets_in_one_hot)).to(self.model.device)

        # L2攻击
        best_l2 = [1e10] * self.batch_size
        adv_images = np.zeros(var_images.size())
        current_prediction_class = [-1] * self.batch_size

        def un_targeted_attack_achieved(pre_softmax, true_class):
            pre_softmax[true_class] += self.config['kappa']
            return np.argmax(pre_softmax) != true_class

        self.model.eval()

        for search_for_const in range(self.config['second_layer_loop']):
            
            per = torch.zeros_like(var_images).float().cpu().numpy() #产生扰动(tensor->numpy)
            per = torch.from_numpy(per).to(self.model.device)
            per.requires_grad = True

            # 用Adam去操作per扰动，用默认的的参数
            optimizer = torch.optim.Adam([per], lr=self.config['learning_rate'])
            var_const = torch.from_numpy(const_origin).to(self.model.device) #不可移出循环
            print("\tsecond_layer_loop {}:".format(search_for_const))

            for iter_times in range(self.config['max_iterations']):
                # tanh使得范围 -> [0, 1]
                perturbed_images = torch.tanh(var_images + per) * half_range + mid_point
                perturbed_images = torch.clamp(perturbed_images, min=0.0, max=1.0)
                perturbed_images_plus_noises = perturbed_images[None, :, :, :, :] + noise_vector[:, None, :, :, :]
                perturbed_images_plus_noises = torch.clamp(perturbed_images_plus_noises, min=0.0, max=1.0)

                l2dist = torch.sum((perturbed_images - (torch.tanh(var_images) * half_range + mid_point)) ** 2, [1, 2, 3])

                loss = l2dist.clone()

                # add the 20 loss terms one by one
                for i in range(self.config['noise_count']):
                    prediction = self.model(perturbed_images_plus_noises[i])
                    const_loss = torch.max((prediction * targets_in_one_hot).sum(1) - (prediction - 1e10 * targets_in_one_hot).max(1)[0],
                                       torch.ones(self.batch_size, device=self.model.device) * self.config['kappa'] * -1)
                    loss += var_const * const_loss

                loss = loss.sum()
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                for i, (dist, score, img) in enumerate(
                        zip(l2dist.data.cpu().numpy(), prediction.data.cpu().numpy(), perturbed_images.data.cpu().numpy())):
                    if dist < best_l2[i] and un_targeted_attack_achieved(score, labels[i]):
                        best_l2[i] = dist
                        current_prediction_class[i] = np.argmax(score)
                        adv_images[i] = img
            # 每次修改常量const_origin
            for i in range(self.batch_size):
                if current_prediction_class[i] == labels[i] and current_prediction_class[i] != -1:
                    const_upper_bound[i] = min(const_upper_bound[i], const_origin[i])
                    if const_upper_bound[i] < 1e10:
                        const_origin[i] = (const_lower_bound[i] + const_upper_bound[i]) / 2.0
                else:
                    const_lower_bound[i] = max(const_lower_bound[i], const_origin[i])
                    if const_upper_bound[i] < 1e10:
                        const_origin = (const_lower_bound[i] + const_upper_bound[i]) / 2.0
                    else:
                        const_origin[i] *= 10
    
        return np.array(adv_images).astype(np.float32)