"""
将对抗样本做一些变换，因为一些防御方法会做大量的变换来抵消对抗样本带来的影响

    resize, padding
    旋转，裁剪，平移，翻转
    FFT变换至频率域，将一些系数置0，再转换回空间域
    加噪声、降噪声 --> 这条好像还比较鸡肋hh
    JPEG压缩
    像素值范围调整[0,255]->[8,200]
    转换至其他颜色空间，修改了以后再转换回来
    直方图均衡化


进行本地数据增强
1、PGD，FGSM
2、旋转90，180度
3、数值区间压缩
4、JPEG压缩


考虑一下几种情况：
同一图像，做以上变换
不同图像，做以上变换

是把变换集中在一张图像上，还是一张图像分别做变换呢？

先把目前存在的可能性做一个排列组合出来，到时候依次尝试这些可能性，调参可能对结果有影响，但是这种级别的调整，参数设置的影响大概率是微乎其微，可以每种可能性尝试1-3次调参

实际上只要预估出阿里这次比赛，测试集中所包含的对抗样本的种类即可吧？不需要做多详尽的分析
"""
import torch
import random
import cv2
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from test import load_model
from numpy import linalg as LA
import math
import time

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
loss_func = nn.CrossEntropyLoss()


def cross_entropy(outputs, smooth_labels):
    loss = torch.nn.KLDivLoss(reduction='batchmean')
    return loss(F.log_softmax(outputs, dim=1), smooth_labels)


def fgsm_attack(models, image, label, epsilon=5/255, device=device, order=2, clip_min=0, clip_max=1):

    assert isinstance(image, torch.Tensor)
    assert isinstance(label, torch.Tensor)
    assert order in [np.inf, 2]

    X_fgsm = torch.clone(image).to(device)
    X_fgsm = normalize(X_fgsm)
    X_fgsm = X_fgsm.type(torch.cuda.FloatTensor)
    X_fgsm.requires_grad = True

    cost = torch.tensor(0, device=device, dtype=torch.double)
    for model in models:
        outputs = model(X_fgsm)
        model.zero_grad()
        targets = label.argmax(dim=-1)
        targets = targets.unsqueeze(0)
        cost += loss_func(outputs, targets)
    # cost = cost.type(torch.cuda.DoubleTensor)
    # cost.dtype = torch.float32
    cost = cost.double()
    cost.backward()

    if order == np.inf:
        d = epsilon * X_fgsm.grad.data.sign()
    elif order == 2:
        gradient = X_fgsm.grad
        d = torch.zeros(gradient.shape, device=device)
        for i in range(gradient.shape[0]):
            norm_grad = gradient[i].data / LA.norm(gradient[i].data.cpu().numpy())
            d[i] = norm_grad * epsilon
    else:
        raise ValueError('Other p norms may need other algorithms')

    x_adv = image + d

    if clip_max == None and clip_min == None:
        clip_max = np.inf
        clip_min = -np.inf

    x_adv = torch.clamp(x_adv, clip_min, clip_max)

    return x_adv


def pgd_attack(models, image, label, eps=5 / 255, alpha=1 / 255, iters=10):
    assert isinstance(image, torch.Tensor)
    assert isinstance(label, torch.Tensor)
    assert len(models) > 0

    adv_image = torch.clone(image).to(device)
    # 噪声初始化, TODO 均匀噪声、低频噪声
    noise = np.random.uniform(-eps, eps, adv_image.shape)
    adv_image = adv_image + torch.tensor(noise, device=device)
    adv_image = torch.clamp(adv_image, 0, 1.0)

    for i in range(iters):
        cal_img = normalize(adv_image)
        # cal_img = cal_img.type(torch.cuda.FloatTensor)
        cal_img = cal_img.float()
        cal_img.requires_grad = True

        cost = torch.tensor(0, device=device, dtype=torch.double)
        for model in models:
            outputs = model(cal_img)
            model.zero_grad()
            targets = label.argmax(dim=-1)
            targets = targets.unsqueeze(0)
            cost += loss_func(outputs, targets)

        # cost = cost.double()
        cost = cost.double()
        cost.backward()

        adv_image = adv_image + alpha * cal_img.grad.sign()
        eta = torch.clamp(adv_image - image, min=-eps, max=eps)
        adv_image = torch.clamp(image + eta, min=0, max=1).detach_()

    return adv_image


if __name__ == '__main__':
    print('start enhance data!')
    start = time.time()
    models = []

    resnet50 = load_model('F:\\AliTianChi\\ali_8_attack\\resnet50.pth.tar', ResTag=True)
    resnet50.eval()
    models.append(resnet50)

    densenet121 = load_model('F:\\AliTianChi\\ali_8_attack\\densenet121.pth.tar', ResTag=False)
    densenet121.eval()
    models.append(densenet121)

    images = np.load('data.npy')
    labels = np.load('label.npy')
    print(images.shape)
    enhance_images = []
    transform_train = transforms.Compose([
        transforms.ToTensor(),
    ])
    for i in range(images.shape[0]):
        img = images[i]
        label = labels[i]
        # if random.random() > 0.9:
        #     print(f'i={i} 啥也不做～')
        #     enhance_images.append(img)
        #     continue
        # 后面全部用torch.tensor类型
        img = transform_train(img)
        img = torch.unsqueeze(img, 0)
        img = img.to(device)
        label = torch.tensor(label)
        label = label.to(device)
        if random.random() > 0.5:
            print(f'i={i} PGD 攻击')
            img = pgd_attack(models=models, image=img, label=label)
            # img = img[0]
            # img = img * 255
            # img = img.cpu().detach().numpy()
            # img = img.transpose(1, 2, 0)
            # enhance_images.append(img)
            # continue
        # if random.random() > 0.5:
        #     print(f'i={i} FGSM 攻击')
        #     img = fgsm_attack(models=models, image=img, label=label)
            # img = img[0]
            # img = img * 255
            # img = img.cpu().detach().numpy()
            # img = img.transpose(1, 2, 0)
            # enhance_images.append(img)
            # continue
        if random.random() > 0.5:
            print(f'i={i} 旋转攻击，主要是旋转90度')
            angle = 90 * math.pi / 180
            theta = torch.tensor([
                [math.cos(angle), math.sin(-angle), 0],
                [math.sin(angle), math.cos(angle), 0]
            ], dtype=torch.float, device=device)
            grid = F.affine_grid(theta.unsqueeze(0), img.size())
            img = img.type(torch.cuda.FloatTensor)
            output = F.grid_sample(img, grid)
            # img = img[0]
            # img = img * 255
            # img = img.cpu().detach().numpy()
            # img = img.transpose(1, 2, 0)
            # enhance_images.append(img)
            # continue

        # print(f'i={i} 数值区间压缩')
        # img = torch.clamp(img, 8, 200)
        # img = img[0]
        # img = img * 255
        # img = img.cpu().detach().numpy()
        # img = img.transpose(1, 2, 0)
        # enhance_images.append(img)
        # continue

        # if random.random() > 0.5:
        #     print('JPEG压缩')
        #     # 先保存再读出来？
        #     enhance_images.append(img)
        #     continue

        img = img[0]
        img = img * 255
        img = img.cpu().detach().numpy()
        img = img.transpose(1, 2, 0)
        enhance_images.append(img)

    enhance_images = np.array(enhance_images)
    np.save('data_enhancement_1.npy', images)
    print(f'cost time={time.time()-start}s')
    exit()
