from cgi import test
from distutils.command.config import config
from pickletools import optimize
from turtle import pensize
import torch
import numpy as np
import random
from EAattak.EvolutionAlgorithm import EA
from tqdm import tqdm, trange


class UAP:

    def __init__(self, dataer, neter, config):

        self.dataer = dataer
        self.neter = neter
        self.config = config
        self.device = self.config['device']
        self.population_size = self.config['population_size']
        self.epsilon = self.config['epsilon']
        self.dim = self.dataer.get_dim()
        self.generation = self.config['generation']
    
    def generate_target_attck_universal_adversarial_perturbations(
            self, 
            epsilon, 
            norm_type, 
            method, 
            batch, 
            size, 
            mode
        ):
        """
        调用循环generate_source_target_universal_adversarial_perturbations生成源类和目标类之间满足需求的样本
        """
        class_num = self.dataer.class_num
        BaseSample = {}
        for source_class in range(class_num):
            for target_class in range(class_num):
                if source_class == target_class:
                    continue
                key = '{}-{}'.format(source_class, target_class)
                BaseSample[key] = self.generate_source_target_universal_adversarial_perturbations(
                    source_class=source_class,
                    target_class=target_class,
                    epsilon=epsilon,
                    norm_type=norm_type,
                    method=method,
                    batch=batch,
                    size=size,
                    mode=mode,
                )
        return BaseSample

    def generate_source_target_universal_adversarial_perturbations(
            self, 
            source_class, 
            target_class, 
            epsilon, 
            norm_type, 
            method, 
            batch, 
            index, 
            mode,
            size,
        ):

        if source_class == target_class:
            raise Exception('The cource class is equal to target class')
               
        # pre-work for evaluation function
        if mode == 'independent':   ## 样本间没有交叉
            loader = self.dataer.get_class_loader(source_class, isTrain=True, batch_size=batch)
            if len(loader) < size:
                raise Exception('Can not provide so mush samples for generating BaseSamples')
            origin_samples = None
            for dex, (samples, labels) in enumerate(loader):
                if dex == index:
                    origin_samples = samples
                    if origin_samples.shape[0] != batch:
                        raise Exception('Can not provide sufficent samlpes for generating BaseSamples')
                    break
        elif mode == 'cross':   ## 样本间有交叉，控制总的使用样本的数目
            loader = self.dataer.get_cross_class_loader(source_class, self.config['total_num'], isTrain=True, batch_size=batch)
            origin_samples, _ = next(iter(loader))
        else:
            raise ValueError            

        def evaluation(populations):
            ## rely on the mode (independent, cross)
            ## 需要dataer进行配合            
            fitness = []
            eval_perturbations = self.dataer.transfer(populations)
            for eval_perturbation in eval_perturbations:
                adv_samples = torch.clamp(origin_samples + eval_perturbation, min=0.0, max=1.0)
                predictions = self.neter.get_prediction_score(adv_samples)
                fitness.append(predictions.mean(dim=0)[target_class])
            return fitness

        optimizer = EA(
            dimesion=self.dataer.get_dim(),
            epsilon=epsilon,
            norm_type=norm_type,
            method=method,
            evaluation=evaluation,
            strategy='random',
            population_size=self.config['population_size'],
            config=self.config,
            optimizer='GA',
        )   ## 随机初始化，根据epsilon, norm_type, method, batch, mode TODO


        perturbation = None
        with trange(self.config['generation']) as t:
            for gen in t:
                t.set_description('Index {}, GEN {}'.format(index, gen))
                optimizer.step()
                maxValue = max(optimizer.fitness)
                t.set_postfix(probability='{:.2f}%'.format(maxValue * 100))

                if (maxValue * 100) > 98.0:
                    print('the probablilty satisfy the goal, early stop...')
                    break

        ## 可能append之前，perturbation需要预处理，比如numpy转tensor，选一个perturbation出来等，到时候再修改
        maxValue_index = optimizer.fitness.index(max(optimizer.fitness))
        maxValueindividual = optimizer.populations[maxValue_index]
        perturbation = self.dataer.transfer(maxValueindividual)  

        ## 优先级初始化可以根据选中的这些perturbation的适应度来给定顺序
        return perturbation
    
    def generate_untarget_universal_adversarial_perturbations(
            self, 
            source_class, 
            epsilon, 
            norm_type, 
            method, 
            batch, 
            index, 
            mode,
            size,
        ):
        
        if self.config['dataset'] != 'ImageNet':

            # pre-work for evaluation function
            if mode == 'independent':   ## 样本间没有交叉
                loader = self.dataer.get_class_loader(source_class, isTrain=True, batch_size=batch)
                if len(loader) < size:
                    raise Exception('Can not provide so mush samples for generating BaseSamples')
                origin_samples = None
                for dex, (samples, labels) in enumerate(loader):
                    if dex == index:
                        origin_samples = samples
                        if origin_samples.shape[0] != batch:
                            raise Exception('Can not provide sufficent samlpes for generating BaseSamples')
                        break
            
            elif mode == 'cross':   ## 样本间有交叉，控制总的使用样本的数目

                print('Cross generation way.')
                loader = self.dataer.get_cross_class_loader(source_class, self.config['total_num'], isTrain=True, batch_size=batch)
                origin_samples, _ = next(iter(loader))
            
            else:

                raise ValueError        

        else:
            
            print('Into ImageNet Data sampling....')
            
            # work for ImageNet, there exist too much error sample into batch, we need to pass this sample.
            if mode == 'independent':
                
                loader = self.dataer.get_class_loader(source_class, isTrain=True, batch_size=1)
                origin_samples = []
                count = 0
                
                for sample, label in loader:
                    
                    sample = sample.to(self.device)
                    label = label.to(self.device)
                    
                    output = self.neter.net(sample)
                    _, pre = torch.max(output.data, 1)
                    
                    if pre != label:
                        continue
                    
                    if int(count // batch) == index:
                        origin_samples.append(sample)

                    if len(origin_samples) == batch:
                        break

                    count += 1

                origin_samples = torch.cat(origin_samples, dim=0).cpu()

                if origin_samples.shape[0] != batch:
                    raise Exception('the batch size is less than {}'.format(batch))

            elif mode == 'cross':

                pass

            else:

                raise ValueError    

        def evaluation(populations):
            ## rely on the mode (independent, cross)
            ## 需要dataer进行配合            
            fitness = []
            eval_perturbations = self.dataer.transfer(populations)
            for eval_perturbation in eval_perturbations:
                adv_samples = torch.clamp(origin_samples + eval_perturbation, min=0.0, max=1.0)
                predictions = self.neter.get_prediction_score(adv_samples)
                fitness.append(-predictions.mean(dim=0)[source_class])
            return fitness

        optimizer = EA(
            dimesion=self.dataer.get_dim(),
            epsilon=epsilon,
            norm_type=norm_type,
            method=method,
            evaluation=evaluation,
            strategy='random',
            population_size=self.config['population_size'],
            config=self.config,
            optimizer='GA',
        )   ## 随机初始化，根据epsilon, norm_type, method, batch, mode TODO


        perturbation = None
        with trange(self.config['generation']) as t:
            for gen in t:
                t.set_description('MUAPs index {}, GEN {}'.format(index, gen))
                optimizer.step()
                maxValue = max(optimizer.fitness)
                t.set_postfix(probability='{:.2f}%'.format(-1 * maxValue * 100))

                if (-1 * maxValue * 100) < 0.1:
                    print('the probablilty satisfy the goal, early stop...')
                    break

        ## 可能append之前，perturbation需要预处理，比如numpy转tensor，选一个perturbation出来等，到时候再修改
        maxValue_index = optimizer.fitness.index(max(optimizer.fitness))
        maxValueindividual = optimizer.populations[maxValue_index]
        perturbation = self.dataer.transfer(maxValueindividual)  

        return perturbation

    ## deprecated
    def generate_universal_adversarial_perturbation(self, target_index, optimizer='EA', norm_type='inf', sign=False):
        """
        EA: evolution algorithm
        DE: differential evolution
        生成通用对抗扰动，target_index指明要攻击生成的目标类，返回扰动
        """
        def fitness_function(perturbations, target, batch_size=8):
            return self.evaluation_1(perturbations, target, batch_size=8)
        ## 定义进化框架，差分进化框架，（2范数，无穷范数）模式，尝试生成扰动
        if sign:
            if optimizer == 'DE' :
                return self.sign_DE(target_index=target_index, fitness_function=fitness_function, norm_type=norm_type)
            else:
                return self.sign_EA(target_index=target_index, fitness_function=fitness_function, norm_type=norm_type)
        else:
            if optimizer == 'DE':
                return self.DE(target_index=target_index, fitness_function=fitness_function, norm_type=norm_type)
            else:
                return self.EA(target_index=target_index, fitness_function=fitness_function, norm_type=norm_type)

    ## deprecated
    def DE(self, target_index, fitness_function, norm_type='inf'):

        pass

    ## deprecated
    def sign_DE(self, target_index, fitness_function, norm_type='inf'):

        pass

    ## deprecated
    def EA(self, target_index, fitness_function, norm_type='inf'):

        self.alpha = self.config['alpha']

        ## init population, give fitness
        population = self.epsilon * (-1 + 2 * np.random.rand(self.population_size, self.dim))
        fitness = fitness_function(population, target_index)
        print(fitness)
        #loop
        gen = 0
        while gen < self.generation:        
            
            ## adptive alpha
            self.alpha = 0.4 + (self.alpha - 0.4) * (self.generation - gen) / self.generation

            ## judge
            ## subsequent improvement
            child_population = np.zeros_like(population)
            ## selection
            parents = np.zeros((self.population_size * 2, self.dim))
            for i in range(2 * self.population_size):
                candidate_1 = random.randint(0, self.population_size - 1)
                candidate_2 = random.randint(0, self.population_size - 1)
                while candidate_1 == (i // 2) or candidate_2 == (i // 2) or candidate_1 == candidate_2:
                    candidate_1 = random.randint(0, self.population_size - 1)
                    candidate_2 = random.randint(0, self.population_size - 1)
                if fitness[candidate_1] > fitness[candidate_2]:
                    parents[i] = population[candidate_1]
                else:
                    parents[i] = population[candidate_2]
            ## cross
            for i in range(self.population_size):
                for j in range(self.dim):
                    p = random.random()
                    if p < 0.5:
                        child_population[i, j] = parents[2 * i, j]
                    else:
                        child_population[i, j] = parents[2 * i + 1, j]
            ## mutation
            for i in range(self.population_size):
                for j in range(self.dim):
                    p  = random.random()
                    if p < 0.1:
                        child_population[i, j] += self.alpha * self.epsilon * (-1 + 2 * random.random())
            
            ## clip
            child_population = np.clip(child_population, a_min=-self.epsilon, a_max=self.epsilon)
            ## evaluation
            child_fitness = fitness_function(child_population, target_index)

            for i in range(self.population_size):
                if child_fitness[i] > fitness[i]:
                    population[i] = child_population[i]
                    fitness[i] = child_fitness[i]
            print('Generation: {}'.format(gen))
            print('Fitness')
            print(fitness)
            ## step 
            gen += 1
        
        self.test(population, 0, 9)

    ## deprecated
    def sign_EA(self, target_index, fitness_function, norm_type='inf'):

        pass
    
    ## deprecated    
    def transfer(self, populations):
        """
        transfer populations to image shape and tensor.
        """
        batch = populations.shape[0]
        image_shape = self.dataer.get_shape()
        shape = [batch, ]
        for shape_i in image_shape[1:]:
            shape.append(shape_i)
        perturbations = populations.reshape(shape)
        return torch.tensor(perturbations).to(dtype=torch.float)

    ## deprecated
    def evaluation_1(self, populations, target, batch_size=1):

        perturbations = self.transfer(populations)
        # loader = self.dataer.get_loader(batch_size=batch_size, isShuffle=False)
        loader = self.dataer.get_class_loader(class_index=9, batch_size=64, isTrain=True)

        clean_samples, lables = next(iter(loader))
        
        fitness = []
        for perturbation in perturbations:
            adv_samples = torch.clamp(clean_samples + perturbation, min=0.0, max=1.0)
            probability_output = self.neter.get_prediction_score(adv_samples)
 
            fitness.append(probability_output.mean(dim=0)[target])

        return fitness

    ## deprecated
    ## 每次花费一个batch的样本去生成统一扰动，然后sample_index去决定去生成第几个样本， 为目标攻击 
    def evaluation_2(self, populations, target, batch_size=1, sample_index=0):
        
        perturbations = self.transfer(populations)
        loader = self.dataer.get_class_loader(class_index=target, batch_size=batch_size)

        origin_samples = None
        if len(loader) < sample_index:
            raise Exception('Can not give so much base samples')
        for order, (samples, _) in enumerate(loader):
            if order == sample_index:
                origin_samples = samples
        
        fitness = []
        for perturbation in perturbations:
            adv_samples = torch.clamp(origin_samples + perturbation, min=0.0, max=1.0)
            probability_output = self.neter.get_prediction_score(adv_samples)
            fitness.append(probability_output.mean(dim=0)[target])
        
        return fitness

    ## deprecated
    def test(self, populations, origin_label, target):

        perturbations = self.transfer(populations)

        for index, perturbation in enumerate(perturbations):
            print('{}-th Perturbation fool rate: {:.2f}%'.format(index, self.neter.adversarial_test(perturbation, target, origin_label)))
            