# 用于测试模型的输出以及攻击
# import packages
import torch
import torchvision.models as models
import torch.nn as nn
import torchvision.transforms as transforms

import foolbox as fb
from foolbox.attacks.boundary_attack import BoundaryAttack
from foolbox.attacks.base import get_is_adversarial
from foolbox.criteria import Criterion
from typing import TypeVar, Any
from abc import ABC, abstractmethod
import eagerpy as ep
from torchvision import transforms as T

import sys
sys.path.append('../')
from ml_gcn_model.util import *
from data.data_voc import Voc2007Classification
from ml_liw_model.models import Inceptionv3Rank
# custom functions

class TargetedMisclassificationML(Criterion):
    """Considers those perturbed inputs adversarial whose predicted class
    matches the target classes. Multi-Label

    Args:
        target_classes: Tensor with target classes ``(batch,)``.
    """

    def __init__(self, target_classes: Any):
        super().__init__()
        self.target_classes: ep.Tensor = ep.astensor(target_classes)
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}({self.target_classes!r})"

    def __call__(self, perturbed: T, outputs: T) -> T:
        outputs_, restore_type = ep.astensor_(outputs)
        del perturbed, outputs
        classes = (outputs_>=0.5)+0

        assert classes.shape == self.target_classes.shape
        is_adv = ep.all(classes == self.target_classes, axis=1)
        return restore_type(is_adv)

def boundary_atk(model, input, tar_sample, tar_label, device = "cuda",batch_size=32, epsilon=0.5, steps=500):
    if isinstance(model, nn.Module):
        model = fb.models.pytorch.PyTorchModel(model=model, bounds=(0, 1), device=device)
    criterion = TargetedMisclassificationML(target_classes=tar_label.to(device))
    attack = BoundaryAttack(steps=steps)
    pert,lmt_pert,_ = attack(model=model, inputs=input.to(device), criterion=criterion, starting_points=tar_sample.to(device), epsilons=epsilon)
    # pert is the best_adv but with large pertubations, while lmt_pert is the closest pertubations but may be not adversarial 
    # print(distance.mean())
    is_adversarial = get_is_adversarial(criterion, model)
    is_adv = is_adversarial(lmt_pert)
    # print(lmt_pert)
    failed=0
    if not is_adv.all():
        failed = is_adv.logical_not().sum()   
    return pert, batch_size-failed

# codes
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
print(device)
num_classes = 20
torchmodel = models.inception_v3(pretrained=True)
torchmodel.eval()
torchmodel.aux_logit = False
for param in torchmodel.parameters():
    param.requires_grad = False

torchmodel = Inceptionv3Rank(torchmodel, num_classes)
torchmodel.load_state_dict(torch.load("../checkpoint/mlliw/voc2007/model_best.pth.tar", map_location=device))
torchmodel.eval()
torchmodel.to(device)

data = '../data/NUSWIDE'
img_size = 448
workers = 4
batch_size = 32


test_dataset = Voc2007Classification(data, 'test')
data_transforms = transforms.Compose([
    Warp(img_size),
    transforms.ToTensor(),
])
test_dataset.transform = data_transforms
test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=workers)
num_classes = 20
target_sample = []
target_label = []
suc_it=0
for i ,(img, label) in enumerate(test_loader):
    if i > 0:
        print(f"iter {i} attack start")
        pert, suc = boundary_atk(torchmodel, img[0], target_sample, target_label, device=device, batch_size=batch_size)
        suc_it+=suc
        if i > 9:
            break
    else:
        target_sample = img[0]
        output = torchmodel(target_sample.to(device))
        target_label = (output>=0.5)+0
        del output
        
print(f"succes {suc_it} of {10*batch_size}")