'''
自监督攻击, 攻击干净样本生成对抗样本用于训练净化网络purify
Self supervised attack is purely based on maximizing the perceptual feature difference.最大化感知特征差异
'''
# 重写图像保存的实现

import os
import sys
sys.path.append('../')
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.utils import save_image
import torchvision
import numpy as np
import random
import re
from PIL import Image
from torch.utils.data import Dataset
import time

def get_hms(seconds): # def make_readable(seconds):
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    # divmod函数：本函数是实现a除以b, 然后返回商与余数的元组。

    return h, m, s
    # return '%02d:%02d:%02d' % (h,m,s)

# 通过设置随机种子可使训练结果可复现。
def set_random_seed(seed):
    """Set random seeds."""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

def sort_key(s):
    #sort_strings_with_embedded_numbers
    re_digits = re.compile(r'(\d+)')
    pieces = re_digits.split(s)  
    pieces[1::2] = map(int, pieces[1::2])  
    return pieces

class AdvImageDataset(Dataset):  
    """
    Clean dataset.
    Args:
        img_dirs: dir list to clean images from.
    """

    # def __init__(self, imgcln_dirs, imgadv_dirs, transform = None):  # shuffle:随机排列
    def __init__(self, img_dirs, transform):
        self.img_dirs = img_dirs
        self.img_names = self.__get_imgnames__()
        self.transform = transform

    def __get_imgnames__(self):
        tmp = []
        images_name = os.listdir(self.img_dirs)
        # sort() 函数用于对原列表进行排序，如果指定参数，则使用比较函数指定的比较函数。
        images_name.sort(key=sort_key) # 将图像名字按照从小到大排序
        for name in images_name:
            tmp.append(os.path.join(self.img_dirs, name))
        return tmp

    def __len__(self):
        return len(self.img_names)

    def __getitem__(self, idx):
        image_path = self.img_names[idx]
        image = Image.open(image_path).convert('RGB')
        if self.transform:
            image = self.transform(image)

        # return image, image_path[-16:]
        return image, os.path.basename(image_path)

#################################################################
# 特征提取
# VGG [42] network pretrained on ImageNet.
# 浅层特征抽取网络，提取浅层特征。
# 低分辨率图像进入后会经过一个卷积+RELU函数，将输入通道数调整为64??
class FeatureExtractor_VGG(nn.Module): # perceptual_criteria 感知标准，继承nn.Module类
    def __init__(self, ssp_layer):               # 类的构造函数或初始化方法
        super(FeatureExtractor_VGG, self).__init__() 
        # you can try other models
        vgg16 = torchvision.models.vgg16(pretrained=True) 
        self.feature_extractor = nn.Sequential(*list(vgg16.features))[:ssp_layer].eval() 
    def forward(self, img): 
        return self.feature_extractor(img)

# ResNet50
class FeatureExtractor_ResNet50(nn.Module):
    def __init__(self, ssp_layer):
        super(FeatureExtractor_ResNet50, self).__init__() 
        resnet50 = torchvision.models.resnet50(pretrained=True)
        self.feature_extractor = nn.Sequential(*list(resnet50.children()))[:ssp_layer].eval()
    def forward(self, img):
        return self.feature_extractor(img)

# SqueezeNet
class FeatureExtractor_SqueezeNet(nn.Module):
    def __init__(self, ssp_layer):
        super(FeatureExtractor_SqueezeNet, self).__init__() 
        squeezenet1_1 = torchvision.models.squeezenet1_1(pretrained=True)
        self.feature_extractor = nn.Sequential(*list(squeezenet1_1.features))[:ssp_layer].eval() 
    def forward(self, img):
        return self.feature_extractor(img)

# AlexNet
class FeatureExtractor_AlexNet(nn.Module):
    def __init__(self, ssp_layer):
        super(FeatureExtractor_AlexNet, self).__init__()
        alexnet = torchvision.models.alexnet(pretrained=True)
        self.feature_extractor = nn.Sequential(*list(alexnet.features))[:ssp_layer].eval()
    def forward(self, img):
        return self.feature_extractor(img)

# argparse是一个Python模块：命令行选项、参数和子命令解析器。
parser = argparse.ArgumentParser(description='SSP Attack') # 创建解析器
parser.add_argument('--sourcedir', default='MS-COCO/pick_training_resize480') # default='clean_imgs'
parser.add_argument('--targetdir', default='MS-COCO/ssp_pick_training_resize480_models4_parallel_roundweightloss_dim_pgd_momentum_iters_random_normTrue')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size')
parser.add_argument('--eps', type=int, default= 16,  help ='perturbation budget')
parser.add_argument('--step_size', type=float, default=0.01, help='Step size')
parser.add_argument('--iters', type=int, default=100, help='Number of SSP Iterations')

parser.add_argument('--decay', type=float, default=1.0, help='momentum factor')
parser.add_argument('--resize_rate', type=float, default=0.9, help='resize factor used in input diversity')
parser.add_argument('--diversity_prob', type=float, default=0.5, help='the probability of applying input diversity')
parser.add_argument('--random_start', type=bool, default=True, help='using random initialization of delta')

parser.add_argument('--ssp_layer_VGG', type=int, default=16, help='VGG layer that is going to be used in SSP')
parser.add_argument('--ssp_layer_ResNet', type=int, default=6, help='ResNet layer that is going to be used in SSP')
parser.add_argument('--ssp_layer_SqueezeNet', type=int, default=5, help='SqueezeNet layer that is going to be used in SSP')
parser.add_argument('--ssp_layer_AlexNet', type=int, default=10, help='AlexNet layer that is going to be used in SSP')

args = parser.parse_args() # parse_args(args=None, nampespace=None)parse_args(),将之前add_argument()定义的参数进行赋值，并返回相关的namespace
print(args)

# prepare output file
f = open(args.targetdir + '.txt', 'w')
print(args, file=f, flush=True)
start_time = time.time()
print('| start_time: %d:%02d:%02d' % (get_hms(start_time)), file=f, flush=True)
print("batch,iter,loss_VGG,loss_ResNet,loss_SqueezeNet,loss_AlexNet,loss_total", file=f, flush=True)

set_random_seed(0) # 设置随机种子

# GPU or CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
# 此后的criterion.to(device)，img = img.to(device)等都是将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去，之后的运算都在GPU上进行。

os.makedirs(args.targetdir, exist_ok=True)

feature_extractor_VGG = FeatureExtractor_VGG(args.ssp_layer_VGG).to(device)
feature_extractor_ResNet50 = FeatureExtractor_ResNet50(args.ssp_layer_ResNet).to(device)
feature_extractor_SqueezeNet = FeatureExtractor_SqueezeNet(args.ssp_layer_SqueezeNet).to(device)
feature_extractor_AlexNet = FeatureExtractor_AlexNet(args.ssp_layer_AlexNet).to(device)

criterion = torch.nn.MSELoss().to(device)
eps = args.eps / 255 # 扰动预算，现在设置的是16，16 / 255
step = args.step_size # 步长
decay = args.decay # 动量因子
resize_rate = args.resize_rate # 图像调整因子
diversity_prob = args.diversity_prob # 应用输入多样性的概率
random_start = args.random_start # 是否使用初始随机化

trans = transforms.Compose([
        # transforms.Resize([480, 480]),       # 256, 将图片短边缩放至x = 256, 长宽比保持不变; transforms.Resize([224, 224])就能将输入图片转化成224×224的输入特征图
        # transforms.CenterCrop(224),   # 224, 功能：从图像中心裁剪图片; size：所需裁剪图片尺寸; 图片从中心点开始计算, 左右宽224, 上下高224的一个裁剪区域
        transforms.ToTensor()
    ])
dataset = AdvImageDataset(img_dirs=args.sourcedir,
                        transform=trans)
data_loader = torch.utils.data.DataLoader(
    dataset,                     # 数据加载器，将数据放在数据库中
    batch_size=args.batch_size,  # 从数据库中每次抽出batch size个样本
    shuffle=False, 
    num_workers=4,               # 用于数据加载的子进程数量，0意味着数据将在主进程中加载
    pin_memory=True)             # 数据加载器将在返回张量之前，将张量复制到CUDA固定内存中

# 归一化函数
mean = [0.485, 0.456, 0.406] # 数据集的均值
std = [0.229, 0.224, 0.225] # 数据集的方差
# 对数据进行归一化的操作 image = (image - mean) / std
def normalize(t):
    t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
    t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
    t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
    return t

# 输入多样化
def input_diversity(x):
    img_size = x.shape[-1]
    img_resize = int(img_size * resize_rate)

    if resize_rate < 1:
        img_size = img_resize
        img_resize = x.shape[-1]

    rnd = torch.randint(low=img_size, high=img_resize, size=(1,), dtype=torch.int32)
    rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)
    h_rem = img_resize - rnd
    w_rem = img_resize - rnd
    pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)
    pad_bottom = h_rem - pad_top
    pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)
    pad_right = w_rem - pad_left

    padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)

    return padded if torch.rand(1) < diversity_prob else x


for i, (img, img_name) in enumerate(data_loader):
    # Generating adversarial image
    # adv初始化，与干净样本同样大小
    img = img.to(device)

    momentum = torch.zeros_like(img).detach().to(device)
    adv = img.clone().detach()
    
    if random_start:
        # Starting at a uniformly random point
        adv = adv + torch.empty_like(adv).uniform_(-eps, eps)
        adv = torch.clamp(adv, min=0, max=1).detach()

    iters = random.randint(1, 200)
    print('Number of SSP Iterations:', iters)
    # for t in range(100): # iterations迭代T，range(start, stop[, step])，start默认为0，故range(100)等价于range[0,100)，从0到100，不包含100；step步长，默认为1
    # for t in range(args.iters):
    for t in range(iters):
        # SSP + PGD
        adv.requires_grad = True
        
        # Calculate loss
        loss_VGG = criterion(feature_extractor_VGG(input_diversity(normalize(adv.clone()))), feature_extractor_VGG(normalize(img.clone().detach())))
        loss_ResNet = criterion(feature_extractor_ResNet50(input_diversity(normalize(adv.clone()))), feature_extractor_ResNet50(normalize(img.clone().detach())))
        loss_SqueezeNet = criterion(feature_extractor_SqueezeNet(input_diversity(normalize(adv.clone()))), feature_extractor_SqueezeNet(normalize(img.clone().detach())))
        loss_AlexNet = criterion(feature_extractor_AlexNet(input_diversity(normalize(adv.clone()))), feature_extractor_AlexNet(normalize(img.clone().detach())))
     
        loss_ResNet_weight = torch.round(loss_VGG / loss_ResNet)
        loss_SqueezeNet_weight = torch.round(loss_VGG / loss_SqueezeNet)
        loss_AlexNet_weight = torch.round(loss_VGG / loss_AlexNet)
        loss = 1 * loss_VGG + loss_ResNet_weight * loss_ResNet + loss_SqueezeNet_weight * loss_SqueezeNet + loss_AlexNet_weight * loss_AlexNet
        loss.backward() # 反向传播

        # 动量 momentum
        grad = adv.grad
        grad = grad / torch.mean(torch.abs(grad), dim=(1,2,3), keepdim=True)
        grad = grad + momentum * decay
        momentum = grad
        adv.data = adv.data + step * grad.sign()

        delta = torch.clamp(adv.data - img.data, min=-eps, max=eps)
        adv.data = torch.clamp(img.data + delta, min=0, max=1).detach()
        adv.grad.data.zero_() # 在grad更新时，每一次运算后都需要将上一次的梯度记录清空
       
        # log
        print("{},{},{},{},{},{},{}".format(i, t, 
                                            (loss_VGG).cpu().detach().numpy(), 
                                            (loss_ResNet_weight * loss_ResNet).cpu().detach().numpy(), 
                                            (loss_SqueezeNet_weight * loss_SqueezeNet).cpu().detach().numpy(), 
                                            (loss_AlexNet_weight * loss_AlexNet).cpu().detach().numpy(), 
                                            loss.cpu().detach().numpy()), file=f, flush=True)

    for img_index in range(img.size()[0]):
        adv_name = img_name[img_index]
        adv_path = os.path.join(args.targetdir, adv_name)
        save_image(adv[img_index], adv_path, nrow=1, normalize=False)
    
    print('Number of processed images:', args.batch_size*(i+1))

end_time = time.time()
print('| end_time: %d:%02d:%02d' % get_hms(end_time), file=f, flush=True)
print('| The time used: %d:%02d:%02d' % (get_hms(end_time - start_time)), file=f, flush=True)
