import numpy as np
import json
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import os

# 使用CUDA设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载 ImageNet 标签
with open("./data/imagenet_class_index.json") as f:
    class_idx = json.load(f)
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]

# 图像预处理
transform = transforms.Compose([
    transforms.Resize((299, 299)),
    transforms.ToTensor(),  # [0, 255] -> [0, 1]
])

# 自定义数据加载函数
def image_folder_custom_label(root, transform, custom_label):
    old_data = dsets.ImageFolder(root=root, transform=transform)
    old_classes = old_data.classes
    label2idx = {item: i for i, item in enumerate(idx2label)}
    new_data = dsets.ImageFolder(
        root=root,
        transform=transform,
        target_transform=lambda x: custom_label.index(old_classes[x])
    )
    new_data.classes = idx2label
    new_data.class_to_idx = label2idx

    return new_data

# 加载数据
normal_data = image_folder_custom_label(root='./data/imagenet', transform=transform, custom_label=idx2label)
normal_loader = Data.DataLoader(normal_data, batch_size=1, shuffle=False)

# 可视化函数
def imshow(img, title):
    npimg = img.numpy()
    plt.figure(figsize=(5, 15))
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.title(title)
    plt.show()

# 加载预训练的 Inception v3 模型
model = models.inception_v3(weights='IMAGENET1K_V1').to(device)
model.eval()

# PGD 攻击实现
def pgd_attack(model, images, labels, eps=0.3, alpha=2/255, iters=40):
    images = images.to(device)
    labels = labels.to(device)
    loss = nn.CrossEntropyLoss()
    ori_images = images.data
    
    for _ in range(iters):
        images.requires_grad = True
        outputs = model(images)
        
        model.zero_grad()
        cost = loss(outputs, labels).to(device)
        cost.backward()

        adv_images = images + alpha * images.grad.sign()
        eta = torch.clamp(adv_images - ori_images, min=-eps, max=eps)
        images = torch.clamp(ori_images + eta, min=0, max=1).detach_()
        
    return images

# 攻击后的测试，输出攻击前后类别
output_dir = './adversarial_samples'
os.makedirs(output_dir, exist_ok=True)

correct = 0
total = 0

for i, (images, labels) in enumerate(normal_loader):
    # 攻击前预测
    images, labels = images.to(device), labels.to(device)
    outputs = model(images)
    _, pre = torch.max(outputs.data, 1)
    pre_label = idx2label[pre.item()]  # 获取预测标签的名称
    print(f"Original Prediction (Before Attack): {pre_label}")
    
    # 进行 PGD 攻击
    adv_images = pgd_attack(model, images, labels)
    
    # 攻击后预测
    outputs = model(adv_images)
    _, adv_pre = torch.max(outputs.data, 1)
    adv_label = idx2label[adv_pre.item()]  # 获取攻击后的预测标签名称
    
    print(f"Adversarial Prediction (After Attack): {adv_label}")
    
    # 可视化并保存对抗样本
    imshow(torchvision.utils.make_grid(adv_images.cpu().data, normalize=True), [adv_label])
    torchvision.utils.save_image(adv_images, f"{output_dir}/adv_image_{i}.png")

    # 计算准确率
    total += 1
    correct += (adv_pre == labels).sum()

print('Accuracy after attack: {:.2f}%'.format(100 * float(correct) / total))
