"""
Inference Script Version Apr 17th 2023


"""
import glob
from dataclasses import dataclass

import numpy as np
from scipy.special import kl_div
from torchvision import transforms, models
from skimage.metrics import structural_similarity as ssim
from torchvision.datasets import ImageFolder
from torch.nn.functional import cosine_similarity

from tools import *
from unet import *
from ddpm import *

dataset_folder_path = "att"  # 替换为实际的文件夹路径

transform = transforms.Compose([
    transforms.Resize((32, 32)),  # 调整图片尺寸
    transforms.ToTensor(),  # 转换为张量
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
@dataclass
class BaseConfig:
    DEVICE = get_default_device()
    DATASET = ImageFolder(dataset_folder_path, transform=transform)  # "MNIST", "Cifar-10", "Cifar-100", "Flowers"

    # Path to log inference images and save checkpoints
    root = "./Logs_Checkpoints"
    os.makedirs(root, exist_ok=True)

    # Current log and checkpoint directory.
    # by default start from "version_0", in training, given a value to a new name folder
    log_folder = "version_0"  # in inference: specific a folder name to load, by default will be the latest version
    checkpoint_name = "ddpm.tar"


@dataclass
class TrainingConfig:
    TIMESTEPS = 1000  # Define number of diffusion timesteps
    IMG_SHAPE = (3, 32, 32) #if BaseConfig.DATASET == "MNIST" else (3, 32, 32)
    NUM_EPOCHS = 2500
    BATCH_SIZE = 128
    LR = 2e-4

    NUM_WORKERS = 2 if str(BaseConfig.DEVICE) != "cpu" else 0  # 0 on cpu device


@dataclass
class ModelConfig:  # setting up attention unet
    BASE_CH = 64  # 64, 128, 256, 512
    BASE_CH_MULT = (1, 2, 4, 8)  # 32, 16, 8, 4
    APPLY_ATTENTION = (False, False, True, False)
    DROPOUT_RATE = 0.1
    TIME_EMB_MULT = 2  # 128


sd = Diffusion_setting(num_diffusion_timesteps=TrainingConfig.TIMESTEPS,
                       img_shape=TrainingConfig.IMG_SHAPE, device=BaseConfig.DEVICE)

generate_video = False

# test
log_dir, checkpoint_dir = setup_log_directory(config=BaseConfig(), inference=True)

model = UNet(
    input_channels=TrainingConfig.IMG_SHAPE[0],
    output_channels=TrainingConfig.IMG_SHAPE[0],
    base_channels=ModelConfig.BASE_CH,
    base_channels_multiples=ModelConfig.BASE_CH_MULT,
    apply_attention=ModelConfig.APPLY_ATTENTION,
    dropout_rate=ModelConfig.DROPOUT_RATE,
    time_multiple=ModelConfig.TIME_EMB_MULT,
)

model.load_state_dict(torch.load(os.path.join(checkpoint_dir, BaseConfig.checkpoint_name), map_location='cpu')["model"], False)
model.to(BaseConfig.DEVICE)

import torch.nn.functional as F
def get_distribution(model, img):
    with torch.no_grad():
        outputs = model(img)
        class_mean = torch.mean(outputs, dim=0)
        outputs = F.softmax(outputs, dim=1)
        # distribution = outputs.numpy().squeeze(0)
    return class_mean


# 定义ResNet-50模型
class ResNet50(nn.Module):
    def __init__(self, num_classes=10):
        super(ResNet50, self).__init__()
        self.resnet50 = torchvision.models.resnet50(pretrained=False)
        # 替换ResNet的头部，使其适应MNIST数据集
        self.resnet50.fc = nn.Linear(self.resnet50.fc.in_features, num_classes)

    def forward(self, x):
        return self.resnet50(x)



# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# modelr = ResNet50().to(device)
# modelr.load_state_dict(torch.load("./resnet/resnet101_cifar10_2.pth"))
# modelr.eval()
# 创建MobileNetV2模型并加载预训练权重
net = models.mobilenet_v2(pretrained=True)
num_ftrs = net.classifier[1].in_features
net.classifier[1] = nn.Linear(num_ftrs, 10)
net.load_state_dict(torch.load("./checkpoints/mobilenetv2_cifar10_1.pth"))

# 初始化模型、损失函数和优化器
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = net.to(device)
net.eval()

def get_last_layer_output(model, img):
    # 将模型设置为评估模式
    model.eval()

    # 推断样本
    with torch.no_grad():
        output = model(img)

    # 获取类别分布
    probabilities = torch.softmax(output, dim=1)

    # 计算类别均值
    class_means = torch.arange(probabilities.shape[1]) @ probabilities[0]

    return class_means

def calc_ssim(img1_path, img2_path):
    img1 = Image.open(img1_path).convert('L')
    img2 = Image.open(img2_path).convert('L')
    img2 = img2.resize(img1.size)
    img1, img2 = np.array(img1), np.array(img2)
    img1, img2 = np.array(img1), np.array(img2)
    # 此处因为转换为灰度值之后的图像范围是0-255，所以data_range为255，如果转化为浮点数，且是0-1的范围，则data_range应为1
    ssim_score = ssim(img1, img2, data_range=255)
    return ssim_score

def bhattacharyya_distance(p, q):
    # 计算巴氏距离
    b = -torch.log(torch.sum(torch.sqrt(p * q)))
    return b

a=0
num=1000
loop = 0
png_files = glob.glob(f'../diffusion/attack/att_mb_pgd/*.png')

for png_file in png_files:

    img_adv = Image.open(png_file)
    img_adv = transform(img_adv).unsqueeze(0).to(device)
    # inference(model, sd,png_file, img_shape=TrainingConfig.IMG_SHAPE, num_images=1, timesteps=150, nrow=1,
    #           log_dir=log_dir, generate_video=generate_video, device=BaseConfig.DEVICE)
    save_path = inference(model, sd,png_file, img_shape=TrainingConfig.IMG_SHAPE, num_images=1, timesteps=200, nrow=1,
               log_dir=log_dir, generate_video=generate_video, device=BaseConfig.DEVICE)

    generated_image = Image.open(save_path)
    generated_image = transform(generated_image).unsqueeze(0).to(device)

    mean1=get_distribution(net,img_adv)
    mean2=get_distribution(net,generated_image)

    out_adv = F.softmax(net(img_adv),dim=1)
    out_gen = F.softmax(net(generated_image),dim=1)
    distance = torch.sum(torch.abs(mean1 - mean2))
    bd = bhattacharyya_distance(out_adv, out_gen)
    loop +=1
    print(bd.item())
    print(mean1,mean2)
    print(distance)
    if bd.item()>0.1 :
        a = a + 1
    print(a)
    if loop>num:
        break
print("检测到对抗样本数量为",a,"个")








