import torch
import torchvision
from Models import densenet
from Models import resnet
from Models import vgg
import torchvision.transforms as T
import matplotlib.pyplot as plt
import numpy as np
import Util
import tqdm
import argparse
"""
'densenet121', 'resnet50', 'vgg_bn'
"""
parser = argparse.ArgumentParser(description="input the model: ")
parser.add_argument('--white_model',type=str,default='resnet50',help='生成对抗样本的模型',required=False)
parser.add_argument('--black_model',type=str,default='vgg_bn',help='攻击的模型',required=False)
parser.add_argument('--eps',type=int,default=0.08,required=False)
parser.add_argument('--k',type=int,default=10,required=False)
arg = parser.parse_args()
torch.backends.cudnn.benchmark = False



class Hook():
    def __init__(self):
        self.input = []
        self.output = []

    def __call__(self, module,In,out):
        self.input.append(In)
        self.output.append(out)


class Attck():
    def __init__(self, eps=arg.eps, k=arg.k,target_library=None):
        """
        Args:
            eps: 扰动大小
            k: 迭代次数
            target_library: 目标图库，（定向攻击)
        """
        self.eps = eps
        self.k = k
        self.library = target_library


    def generate(self,model,model_name,src,tar):
        """
        Args:
            model:
            model_name (object):
            src:
            tar:
        Returns:

        """
        model.eval()
        adv = torch.Tensor(src.cpu()).to(src.device)
        adv.requires_grad = True
        alpha = self.eps / self.k
        momentum = torch.zeros(src.shape).to(src.device)
        with torch.no_grad():
            tgt_furthest = self.__get_target(model, model_name,adv, target_label=tar)
            tgt_activation = self.__fearture_hook(model,model_name,tgt_furthest)

        for i in range(self.k):
            model.zero_grad()
            loss = torch.norm((self.__fearture_hook(model,model_name,adv)-tgt_activation).reshape(tgt_activation.size(0),-1),
                              p=2,
                              dim=1)
            grad = torch.autograd.grad(loss, adv,grad_outputs=torch.ones(loss.size(0)).to(src.device),retain_graph=False, create_graph=False)[0]
            momentum = momentum + grad / torch.norm(grad, p=1)
            adv = torch.clip(adv - alpha * torch.sign(momentum), min=0, max=1)

        return adv

    def __fearture_hook(self,model,model_name,img):
        """
        Args:
            img:
        Returns:中间层特征
        #根据论文，这一层的效果最好.ormodel
        """
        hook = Hook()
        if model_name == "densenet121":
            hander = model.features.denseblock4.denselayer14.register_forward_hook(hook)
        elif model_name == "resnet50":
            hander = model.layer1.register_forward_hook(hook)
        else:
            hander = model.avgpool.register_forward_hook(hook)

        model.eval()
        _ = model(img)
        hander.remove()

        return hook.output[0]

    def __get_target(self,model,model_name,ori_img,target_label:torch.Tensor):
        """
        #返回库中最远的的目标图，采用切片的方式比较快
        Args:
            model:  model
            ori_img:  n , c , w ,h
            target_label:  n,1

        Returns: n , 1  distance

        """
        lib = self.library
        images = [lib[str(i)] for i in target_label.cpu().numpy().tolist()]
        images = torch.Tensor(images).to(ori_img.device)
        images = torch.squeeze(images,dim=2)
        # images (batch,100,c,w,h)
        q = self.__fearture_hook(model,model_name,ori_img)
        #batch ,c, w,h
        batch_size = images.size(0)
        image = torch.cat([images[i].squeeze(dim=0) for i in range(batch_size)],dim=0)
        #100*batch_size ,c, w,h
        p = self.__fearture_hook(model,model_name,image)
        """
        the first 100 is the first batch's output
        100*batch_size ,c, w,h
        """
        q = q.reshape(q.shape[0],1,-1)
        #batch num -1
        p = p.reshape(q.shape[0],images.shape[1],-1)
        diff = q-p
        distance = torch.norm(diff,dim=2)
        best = torch.argmax(distance,dim=1)
        tmp = images[range(batch_size), best]
        return tmp



classes = ('plane', 'car', 'bird', 'cat', 'deer',
           'dog', 'frog', 'horse', 'ship', 'truck')



def get_module(name):
    assert name in ['densenet121', 'resnet50', 'vgg_bn']
    if name == 'densenet121':
        model = densenet.densenet121()
        state_dicts_densenet121 = torch.load("./Models/state_dicts/densenet121.pth", map_location=None)
        model.load_state_dict(state_dicts_densenet121['net'])
        model.eval()
    elif name == 'resnet50':
        model = resnet.resnet50()
        state_dicts_resnet50 = torch.load("./Models/state_dicts/resnet50.pth", map_location=None)
        model.load_state_dict(state_dicts_resnet50['net'])
        model.eval()
    else:
        model = vgg.vgg19_bn(pretrained=False)
        state_dicts_vggg19bn = torch.load("./Models/state_dicts/vgg19.pth", map_location=None)
        model.load_state_dict(state_dicts_vggg19bn['net'])
        model.eval()
    return model

def imshow(img,ax,label):
    img = img.detach().cpu().numpy()
    img[0] = img[0]*0.2471 + 0.4914
    img[1] = img[0] * 0.2435 + 0.4822
    img[2] = img[0] * 0.2616 + 0.4465
    ax.imshow(np.transpose(img, (1, 2, 0)))
    label.astype(np.int16)
    ax.set_title(f'{[classes[i]for i in label]}')


fig,(ax1,ax2) = plt.subplots(2,1)


device = torch.device("cuda:0"if torch.cuda.is_available()else "cpu")
if __name__ == '__main__':
    transform = T.Compose([T.ToTensor(),T.Normalize(mean=(0.4914, 0.4822, 0.4465),std = (0.2471, 0.2435, 0.2616))])
    """
    white model to generate adv samples 
    """

    white = get_module(arg.white_model)
    """
    black model to attack
    """
    black = get_module(arg.black_model)
    """
    device
    """
    black = black.to(device)
    white = white.to(device)

    batch = 5
    testset = torchvision.datasets.CIFAR10(root='./data', train=False,download=True, transform=transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch, shuffle=True, num_workers=0,drop_last = True)
    target_images = Util.Savetar(path_file='./library/library.json')
    attack = Attck(eps=arg.eps,k=arg.k,target_library=target_images)

    uTR = 0
    error_rate = 0
    tTR = 0
    tSuc = 0
    dtTR = 0
    attack_bar = tqdm.tqdm(enumerate(testloader),total=len(testset)/batch,ncols=100,unit='batch_image',unit_scale=True)
    for i,(imag,label) in attack_bar:
        """
        device
        """
        imag = imag.to(device)
        label = label.to(device)

        adv = attack.generate(white,arg.white_model,imag,(label+1)%10)

        out_source = white(adv)
        out_target = black(adv)

        adv_target_label = torch.argmax(out_target,dim=1)
        adv_source_label = torch.argmax(out_source,dim=1)


        if i == 0:
            imshow(torchvision.utils.make_grid(imag), ax1,label.cpu().numpy())
            imshow(torchvision.utils.make_grid(adv), ax2,adv_target_label.cpu().numpy())
            plt.savefig('./attack1.jpg')

        """
        some metric 
        error rate:the percentage of adversarial examples generated with fw that are misclassified by fb
        uTR:       untargeted transfer rate
        tTR:       targeted transfer rate
        tSuc:      the rate at which adversarial examples generated with fw are classified by fb as the target label
        """
        error_rate += torch.sum(label != adv_target_label).cpu().numpy().item()

        target_result = label != adv_target_label
        source_result = label != adv_source_label
        uTR += torch.sum((target_result==1)&(source_result==1)).cpu().numpy().item()

        target_adv_label = (label + 1) % 10
        dtTR += torch.sum(adv_source_label == target_adv_label).cpu().numpy().item()

        tTR += torch.sum((adv_target_label==target_adv_label)&(adv_source_label == target_adv_label)).cpu().numpy().item()

        tSuc += torch.sum(adv_target_label == target_adv_label).cpu().numpy().item()


    tTR /= dtTR
    error_rate /= len(testset)
    tSuc /= len(testset)
    uTR /= len(testset)
    print(f'error_rate = {error_rate}\n'
          f'tTR = {tTR}\n'
          f'tSuc = {tSuc}\n'
          f'uTR = {uTR}')


    plt.show()
