import torch
import torchvision
import random
import cv2
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from numpy import linalg as LA
import math
import time
import sys
from PIL import Image
import torch.utils.data as data
# from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from pytorch_msssim import MS_SSIM
# from torchvision.transforms.autoaugment import AutoAugmentPolicy
from scipy.fftpack import dct, idct
from torchvision.transforms import functional as ttf
import sys
sys.path.append('../data_enhance')
from test_data_expand import EnhanceType
# from data_enhance.test_data_expand import EnhanceType

seed = 11037
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
pad = transforms.Pad(1)
rotation = transforms.RandomRotation(degrees=(5, 45))
erasing = transforms.RandomErasing(p=0.8, scale=(0.03, 0.1))
gauss = transforms.GaussianBlur(kernel_size=3)
loss_func = nn.CrossEntropyLoss()
ms_ssim_eval = MS_SSIM(data_range=1.0, win_size=3, size_average=False)


# def show_img(img):
#     img = img[0]
#     img = img.cpu().detach().numpy()
#     img = img.transpose(1, 2, 0)
#     plt.imshow(img)
#     plt.show()

# 加载已经训练好的预训练模型，来源： https://github.com/chenyaofo/pytorch-cifar-models
def get_models():
    models = []

    resnet = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_resnet32", pretrained=True)
    resnet = resnet.to(device)
    resnet.eval()
    models.append(resnet)

    vgg = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_vgg13_bn", pretrained=True)
    vgg = vgg.to(device)
    vgg.eval()
    models.append(vgg)

    mobilenetv2 = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_mobilenetv2_x0_75", pretrained=True)
    mobilenetv2 = mobilenetv2.to(device)
    mobilenetv2.eval()
    models.append(mobilenetv2)

    shufflenetv2 = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_shufflenetv2_x1_0", pretrained=True)
    shufflenetv2 = shufflenetv2.to(device)
    shufflenetv2.eval()
    models.append(shufflenetv2)

    repvgg = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_repvgg_a0", pretrained=True)
    repvgg = repvgg.to(device)
    repvgg.eval()
    models.append(repvgg)

    return models


# 从高斯分布中采样噪声，并转换为低频噪声
def sample_gaussian_torch(image_size, dct_ratio=1 / 8):
    x = torch.zeros(image_size)
    fill_size = int(image_size[-1] * dct_ratio)
    x[:, :, :fill_size, :fill_size] = torch.randn(x.size(0), x.size(1), fill_size, fill_size)
    if dct_ratio < 1.0:
        x = torch.tensor(idct(idct(x.numpy(), axis=3, norm='ortho'), axis=2, norm='ortho'), dtype=torch.float32, device=device)
    return x


# PGD攻击
def pgd_attack(models, image, label, eps=8 / 255, alpha=1 / 255, iters=20, noise_type=0):

    adv_image = torch.clone(image).to(device)
    # 噪声初始化
    if noise_type == 0:  # 高斯噪声
        noise = np.random.uniform(-eps, eps, adv_image.shape)
        noise = torch.tensor(noise, dtype=torch.float32, device=device)
    else:  # 低频噪声
        noise = sample_gaussian_torch(image_size=image.shape)

    adv_image = adv_image + noise
    adv_image = torch.clamp(adv_image, 0, 1.0)

    for i in range(iters):
        cal_img = normalize(adv_image)
        cal_img.requires_grad = True

        cost = torch.tensor(0, device=device, dtype=torch.float32)
        for model in models:
            outputs = model(cal_img)
            model.zero_grad()
            cost += loss_func(outputs, label)
        cost.backward()

        adv_image = adv_image + alpha * cal_img.grad.sign()
        eta = torch.clamp(adv_image - image, min=-eps, max=eps)
        adv_image = torch.clamp(image + eta, min=0, max=1).detach_()

    return adv_image


# 对图像做分块、置乱
def RFL(img):
    """
    将图像划分为16个8*8的图像块
    将16个图像块打乱顺序
    """
    block_list = []
    for w in range(4):
        for h in range(4):
            block_list.append(img[:, :, w*8:(w+1)*8, h*8:(h+1)*8])
    random.shuffle(block_list)
    shuffle_img = torch.zeros(size=img.shape, device=device)
    for w in range(4):
        for h in range(4):
            shuffle_img[:, :, w*8:(w+1)*8, h*8:(h+1)*8] = block_list[w*4+h]
    return shuffle_img


# 对图像做 mix_up
def mix_up(images, labels, enhance_labels):
    for i in range(images.shape[0]):
        # 随机一个起始数值
        random_idx = random.randint(0, 32)
        for j in range(random_idx, images.shape[0]):
            if labels[j] == labels[i]:
                continue
            mix_up_proportion = random.uniform(0.1, 0.45)
            # 执行mix_up操作
            images[i] = (1 - mix_up_proportion) * images[i] + mix_up_proportion * images[j]
            # 执行soft_label操作
            soft_label = np.zeros(10)
            soft_label[labels[i].item()] += (1 - mix_up_proportion)  # an unnormalized soft label vector
            soft_label[labels[j].item()] += mix_up_proportion
            enhance_labels.append(soft_label)
            break
    return images, enhance_labels


def check_pred(models, ori_imgs:torch.Tensor, advs:torch.Tensor, labels:torch.Tensor, enhance_imgs, enhance_labels):
    # 计算 msssim
    ms_ssim_scores = ms_ssim_eval(pad(ori_imgs), pad(advs))
    # 计算是否能被正确识别
    adv_nor = normalize(advs)
    index_of_true = set()
    for model in models:
        outputs = model(adv_nor)
        _, index = torch.topk(outputs, 1)
        index = index.view(-1)
        dif = index - labels
        a = (dif == 0).nonzero().view(-1).cpu().numpy().tolist()
        index_of_true.update(a)
    # 全部模型都识别错误，结束了
    if len(index_of_true) == 0:
        for shape_count in range(advs.shape[0]):
            img = advs[shape_count]
            img = img * 255
            img = img.cpu().detach().numpy()
            img = img.transpose(1, 2, 0)
            enhance_imgs.append(img)
            soft_label = np.zeros(10)
            soft_label[labels[shape_count].item()] += random.uniform(0, 10)  # an unnormalized soft label vector
            enhance_labels.append(soft_label)
        return [], [], enhance_imgs, enhance_labels

    # 还有模型能正确识别，留下还能正确识别的图像，控制 msssim 的前提下，尽可能令图像识别错误
    img_need_continue_enhance = []
    label_need_continue_enhance = []
    for shape_count in range(advs.shape[0]):
        img = advs[shape_count]
        ms_ssim_score = ms_ssim_scores[shape_count]
        # 无法忍受的图像质量，把当前样本换成传入的原图像
        if ms_ssim_score < 0.75:
            img = ori_imgs[shape_count]
            img_need_continue_enhance.append(img.cpu().numpy())
            label_need_continue_enhance.append(labels[shape_count].item())
            continue
        # msssim 符合预期，开始判断是否需要继续进行图像增强
        if shape_count in index_of_true:
            img_need_continue_enhance.append(img.cpu().numpy())
            label_need_continue_enhance.append(labels[shape_count].item())
        else:
            img = img * 255
            img = img.cpu().detach().numpy()
            img = img.transpose(1, 2, 0)
            enhance_imgs.append(img)
            soft_label = np.zeros(10)
            soft_label[labels[shape_count].item()] += random.uniform(0, 10)  # an unnormalized soft label vector
            enhance_labels.append(soft_label)

    img_need_continue_enhance = np.asarray(img_need_continue_enhance)
    img_need_continue_enhance = torch.tensor(img_need_continue_enhance, device=device)
    label_need_continue_enhance = np.asarray(label_need_continue_enhance)
    label_need_continue_enhance = torch.tensor(label_need_continue_enhance, device=device)
    # 返回还需要继续计算的图像及其对应的标签、已经计算好的图像及其标签
    return img_need_continue_enhance, label_need_continue_enhance, enhance_imgs, enhance_labels


# 生成数据集
def gen_dataset(dataloader, enhance_imgs, enhance_labels, enhance_count, break_limit=None):
    for i, (images, labels) in enumerate(dataloader):
        # 有终止条件
        if break_limit is not None and i >= break_limit:
            break
        images = images.to(device)
        labels = labels.to(device)

        # 在图像增强的时候就进行软标签计算
        s_tag = False

        op = random.uniform(0, 1)
        advs = images
        # if op <= 2:
        #     advs = images
        #     enhance_count[0] += 1
        # elif 0.25 < op <= 0.325:
        #     eps = random.randint(5, 15)
        #     advs = pgd_attack(models, images, labels, eps=eps/255.0)
        #     enhance_count[1] += 1
        # elif 0.325 < op <= 0.4:
        #     eps = random.randint(5, 15)
        #     advs = pgd_attack(models, images, labels, eps=eps/255.0, noise_type=1)
        #     enhance_count[2] += 1
        # elif 0.4 < op <= 0.45:
        #     advs = rotation(images)
        #     enhance_count[3] += 1
        # # elif 0.3 < op <= 0.35:
        # #     advs = pgd_attack(models, images, labels)
        # #     enhance_count[4] += 1
        # # elif 0.3 < op <= 0.35:
        # #     advs = pgd_attack(models, images, labels)
        # #     enhance_count[5] += 1
        # elif 0.45 < op <= 0.5:
        #     advs = gauss(images)
        #     enhance_count[6] += 1
        # elif 0.5 < op <= 0.55:
        #     advs = ttf.invert(images)
        #     enhance_count[7] += 1
        # elif 0.55 < op <= 0.6:
        #     imgs = torch.clone(images)
        #     imgs = imgs * 255
        #     imgs = torch.tensor(imgs.cpu().numpy(), dtype=torch.uint8, device=device)
        #     advs = ttf.equalize(imgs) / 255.0
        #     enhance_count[8] += 1
        # # elif 0.3 < op <= 0.35:
        # #     advs = pgd_attack(models, images, labels)
        # #     enhance_count[9] += 1
        # elif 0.6 < op <= 0.65:
        #     brightness_factor = random.uniform(1.2, 1.7)
        #     advs = ttf.adjust_brightness(images, brightness_factor=brightness_factor)
        #     enhance_count[10] += 1
        # elif 0.65 < op <= 0.675:
        #     brightness_factor = random.uniform(0.3, 0.8)
        #     advs = ttf.adjust_brightness(images, brightness_factor=brightness_factor)
        #     enhance_count[11] += 1
        # elif 0.675 < op <= 0.7:
        #     hue_factor = random.uniform(-0.3, 0.3)
        #     advs = ttf.adjust_hue(images, hue_factor=hue_factor)
        #     enhance_count[12] += 1
        # elif 0.7 < op <= 0.75:
        #     saturation_factor = random.uniform(0.5, 5)
        #     advs = ttf.adjust_saturation(images, saturation_factor=saturation_factor)
        #     enhance_count[13] += 1
        # elif 0.75 < op <= 0.8:
        #     sharpness_factor = random.uniform(0.5, 5)
        #     advs = ttf.adjust_sharpness(images, sharpness_factor=sharpness_factor)
        #     enhance_count[14] += 1
        # elif 0.8 < op <= 0.85:
        #     threshold = random.uniform(0.3, 0.7)
        #     advs = ttf.solarize(images, threshold=threshold)
        #     enhance_count[15] += 1
        # # elif 0.3 < op <= 0.35:
        # #     advs = pgd_attack(models, images, labels)
        # #     enhance_count[16] += 1
        # elif 0.85 < op <= 0.9:
        #     deg = random.randint(-45, 45)
        #     advs = ttf.affine(images, angle=0.0, translate=[0, 0], scale=1.0, shear=[math.degrees(deg), 0.0], fill=[0.0])
        #     enhance_count[17] += 1
        # elif 0.9 < op <= 0.95:
        #     deg = random.randint(-45, 45)
        #     advs = ttf.affine(images, angle=0.0, translate=[0, 0], scale=1.0, shear=[0.0, math.degrees(deg)], fill=[0.0])
        #     enhance_count[18] += 1
        # else:
        #     advs, enhance_labels = mix_up(images, labels, enhance_labels)
        #     s_tag = True
        #     enhance_count[19] += 1

        for shape_count in range(advs.shape[0]):
            img = advs[shape_count]
            img = img * 255
            img = img.cpu().detach().numpy()
            img = img.transpose(1, 2, 0)
            enhance_imgs.append(img)
            # 软标签已经在图像增强的时候计算了一遍
            if s_tag:
                continue
            soft_label = np.zeros(10)
            soft_label[labels[shape_count].item()] += random.uniform(0, 10)  # an unnormalized soft label vector
            enhance_labels.append(soft_label)

        print(f'i={i} enhance_imgs={len(enhance_imgs)} '
              f'enhance_labels={len(enhance_labels)} '
              f'enhance_count={enhance_count}')
    return enhance_imgs, enhance_labels, enhance_count


class AddPepperNoise(object):
    """增加椒盐噪声
    Args:
        snr （float）: Signal Noise Rate 噪声等级
        p (float): 概率值，依概率执行该操作
    """

    def __init__(self, snr=0.5, p=0.5):
        assert isinstance(snr, float) and (isinstance(p, float))    # 2020 07 26 or --> and
        self.snr = snr
        self.p = p

    def __call__(self, img):
        """
        Args:
            img (PIL Image): PIL Image
        Returns:
            PIL Image: PIL image.
        """
        if random.uniform(0, 1) < self.p:
            img_ = np.array(img).copy()
            h, w, c = img_.shape
            signal_pct = self.snr
            noise_pct = (1 - self.snr)
            mask = np.random.choice((0, 1, 2), size=(h, w, 1), p=[signal_pct/2., signal_pct/2., noise_pct])
            mask = np.repeat(mask, c, axis=2)
            img_[mask == 0] = 0   # 盐噪声
            img_[mask == 1] = 255     # 椒噪声
            return Image.fromarray(img_.astype('uint8')).convert('RGB')
        else:
            return img


class Gaussian_noise(object):
    """增加高斯噪声
    此函数用将产生的高斯噪声加到图片上
    传入:
        img   :  原图
        mean  :  均值
        sigma :  标准差
    返回:
        gaussian_out : 噪声处理后的图片
    """

    def __init__(self, mean=0.0, sigma=1):

        self.mean = mean
        self.sigma = sigma

    def __call__(self, img):
        """
        Args:
            img (PIL Image): PIL Image
        Returns:
            PIL Image: PIL image.
        """
        # 将图片灰度标准化
        img_ = np.array(img).copy()
        img_ = img_ / 255.0
        # 产生高斯 noise
        noise = np.random.normal(self.mean, self.sigma, img_.shape)
        # 将噪声和图片叠加
        gaussian_out = img_ + noise
        # 将超过 1 的置 1，低于 0 的置 0
        gaussian_out = np.clip(gaussian_out, 0, 1)
        # 将图片灰度范围的恢复为 0-255
        gaussian_out = np.uint8(gaussian_out*255)
        # 将噪声范围搞为 0-255
        # noise = np.uint8(noise*255)
        return Image.fromarray(gaussian_out).convert('RGB')


if __name__ == '__main__':
    """
    用模型挑选图片，能够保证和原图的质量相似，但是又可以令众多模型识别错误的样本，可以认为是难样本？
    """
    print('执行本地数据增强脚本！\n开始加载预训练模型...')
    # models = get_models()
    models = []
    print(f'成功加载 {len(models)} 个预训练模型！')

    trans = transforms.RandomChoice([
        AddPepperNoise(),
        Gaussian_noise(),
    ])
    # 使用测试集
    transform_train = transforms.Compose([
        AddPepperNoise(),
        transforms.ToTensor(),
    ])
    dataset = torchvision.datasets.CIFAR10(root='/home/gaotiegang01/liuhao/dataset/CIFAR10', train=False, download=False, transform=transform_train)
    dataloader = DataLoader(dataset=dataset, num_workers=2, batch_size=64, shuffle=False)
    start = time.time()
    enhance_imgs = []
    enhance_labels = []
    print('开始处理数据！')
    # 初始化统计数据
    enhance_count = []
    for jj in range(len(EnhanceType)):
        enhance_count.append(0)

    print('开始处理一部分原始训练集数据！')
    enhance_imgs, enhance_labels, enhance_count = gen_dataset(dataloader=dataloader, enhance_imgs=enhance_imgs,
                                                              enhance_labels=enhance_labels, enhance_count=enhance_count,
                                                              # break_limit=625
                                                              )

    transform_test = transforms.Compose([
        Gaussian_noise(),
        transforms.ToTensor(),
    ])
    test_dataset = torchvision.datasets.CIFAR10(root='/home/gaotiegang01/liuhao/dataset/CIFAR10', train=False, download=False, transform=transform_test)
    test_dataloader = DataLoader(dataset=test_dataset, num_workers=2, batch_size=64, shuffle=False)

    print('一部分原始训练集数据处理完毕！开始处理测试集的数据！')
    enhance_imgs, enhance_labels, enhance_count = gen_dataset(dataloader=test_dataloader, enhance_imgs=enhance_imgs,
                                                              enhance_labels=enhance_labels, enhance_count=enhance_count)

    data_select = np.asarray(enhance_imgs, dtype=np.uint8)
    label_select = np.asarray(enhance_labels)

    print(data_select.shape)
    print(label_select.shape)
    np.save('data.npy', data_select)
    np.save('label.npy', label_select)
    print(f'cost_time={time.time() - start}s')
    exit()


