import numpy as np
import torch
import torch.nn.functional as F

from attack.base_attack import BaseAttack
from torch.autograd import Variable
from attack.differential_evolution import differential_evolution

class Attack(BaseAttack):
	def __init__(self, model, config):
		super().__init__('OPA', model, config, batch_size = 25, targeted = False, llc = False)
		self.test_dataset = self.model.get_test_loader(batch_size = self.batch_size, shuffle = False)

	def perturb_image(self, pixels, image):
		## If there's just one perturbation vector,pack it in a list to keep the computation the same
		if pixels.ndim < 2:
			pixels = np.array([pixels])

		index = len(pixels)
		images = image.repeat(index, 1, 1, 1)
		pixels = pixels.astype(int)

		count = 0 #计数达到pixel上限

		for pixel in pixels:
			pixels = np.split(pixel, len(pixel)/5)
			
			for pixel in pixels:
				x_pos, y_pos, r, g, b = pixel
				images[count, 0, x_pos, y_pos] = (r/255.0-0.4914)/0.2023
				images[count, 1, x_pos, y_pos] = (g/255.0-0.4822)/0.1994
				images[count, 2, x_pos, y_pos] = (b/255.0-0.4465)/0.2010
			count = count+1
		return images

	def predict_classes(self, pixels, image, target_class, minimize=True):
		with torch.no_grad():
			perturbed_images = self.perturb_image(pixels, image.clone())
			var_images = torch.Tensor(perturbed_images).to(self.model.device)
			predictions = F.softmax(self.model(var_images),dim=1).data.cpu().numpy()[:, target_class]

			return predictions if minimize else 1 - predictions

	def attack_success(self,pixel, image, target_class, targeted_attack=False, verbose=False):

		attack_image = self.perturb_image(pixel, image.clone())
		var_images = torch.Tensor(attack_image).to(self.model.device)
		confidence = F.softmax(self.model(var_images),dim=1).data.cpu().numpy()[0]
		predicted_class = np.argmax(confidence)

		if (verbose):
			print("Confidence: %.2f".format(confidence[target_class]))
		if (targeted_attack and predicted_class == target_class) or (not targeted_attack and predicted_class != target_class):
			return True

	def attack_once(self, image, label, labels=None, pixels=1, maxiter=75, popsize=400, verbose=False):
		with torch.no_grad():
			targeted_attack = labels is not None
			target_class = labels if targeted_attack else label

			bounds = [(0,32), (0,32), (0,255), (0,255), (0,255)] * pixels

			popmul = max(1, popsize//len(bounds)) # //除法取整

			predict_fn = lambda pixels: self.predict_classes(pixels, image, target_class, labels is None)
			callback_fn = lambda pixel, convergence: self.attack_success(pixel, image, target_class, targeted_attack, verbose)

			inits = np.zeros([popmul*len(bounds), len(bounds)])
			for init in inits:
				for i in range(pixels):
					init[i*5+0] = np.random.random()*32
					init[i*5+1] = np.random.random()*32
					init[i*5+2] = np.random.normal(128,127)
					init[i*5+3] = np.random.normal(128,127)
					init[i*5+4] = np.random.normal(128,127)

			attack_result = differential_evolution(predict_fn, bounds, maxiter=maxiter, popsize=popmul,
				recombination=1, atol=-1, callback=callback_fn, polish=False, init=inits)

			attack_image = self.perturb_image(attack_result.x, image)
			attack_var_images = torch.Tensor(attack_image).to(self.model.device)
			predicted_probs = F.softmax(self.model(attack_var_images),dim=1).data.cpu().numpy()[0]

			predicted_class = np.argmax(predicted_probs)

			if (not targeted_attack and predicted_class != label) or (targeted_attack and predicted_class == target_class):
				return attack_var_images, 1, attack_result.x.astype(int)
			return attack_var_images, 0, [None]

	def attack_batch(self, targeted=False, verbose=False):

		print('OPA attack perturbs the images now ......\n')
		
		iter = 0
		adv_images = []

		with torch.no_grad():
			for index, (images, labels) in enumerate(self.test_dataset):

				correct = 0.0
				success = 0.0
				
				iter += 1

				var_images = torch.Tensor(images).to(self.model.device)
				prior_probs = F.softmax(self.model(var_images),dim=1)
				_, indices = torch.max(prior_probs, 1)
				
				if labels[0] != indices.data.cpu()[0]:
					continue

				correct += 1
				labels = labels.numpy()
				targets = [None] if not targeted else range(10)

				for target_class in targets:
					if (targeted):
						if (target_class == labels[0]):
							continue
					
					adv_image,flag,x = self.attack_once(images, labels[0], target_class, pixels=self.config['pixels'], maxiter=self.config['maxiter'], popsize=self.config['popsize'], verbose=verbose)
					adv_images.extend(adv_image)
					success += flag
					if (targeted):
						success_rate = float(success)/(9*correct)
					else:
						success_rate = float(success)/correct

						if flag == 1:
							print ("Iteration %d :Success rate: %.4f (%d/%d) [(x,y) = (%d,%d) and (R,G,B)=(%d,%d,%d)]"%(
								iter,success_rate, success, correct, x[0],x[1],x[2],x[3],x[4]))
					
				if correct == 100:
					break

			return adv_images,success_rate

	def attack(self, images, labels):
		print(len(self.test_dataset))
		with torch.no_grad():
			adv_images,result = self.attack_batch(targeted=False, verbose=False)
			print ("Final success rate: %.2f".format(result))
			return np.array(adv_images)