import numpy as np
import torch

from util.seed import set_seed
from raw.get_model import get_model
from attack.get_attack import get_attack
from defense.get_defense import get_defense

class SecurityEvaluation:
    def __init__(self, raw_model, attack, attack_suffix = ''):
        self.attack = attack

        # 加载攻击前目标数据
        base_dir = 'data/candidate_data/' + raw_model.model_name + '/'
        self.labels = np.load(base_dir + 'labels.npy')
        if attack.targeted:
            if attack.llc:
                self.targets = np.load(base_dir + 'llcs.npy')
            else:
                self.targets = np.load(base_dir + 'targets.npy')
        else:
            self.targets = None

        # 加载攻击后目标数据
        base_dir = 'data/adversarial_data/' + raw_model.model_name + '_' + attack.name + attack_suffix + '/'
        self.adv_images = np.load(base_dir + 'images.npy')
        self.adv_labels = np.load(base_dir + 'labels.npy')

    # 防御后模型预测
    def defense_predict(self, defense, defense_suffix = ''):
        defense.load_enhanced_state(suffix = defense_suffix) # 加载防御后的网络参数

        defense.model.eval() # 将网络设置为测试模式

        var_images = torch.from_numpy(self.adv_images.copy()) # 获取图像的torch.Tensor拷贝
        var_images = defense.transform_input(var_images) # 对图像进行变形

        var_out = defense.predict(var_images.to(defense.model.device)) # 进行预测
        return torch.argmax(var_out, dim = 1).detach().cpu().numpy() # 得到numpy.ndarray预测结果标签

    # 统计攻击防御情况
    def get_accuracy(self, defense_labels):
        attack_affected = 0
        defense_success = 0
        for i in range(len(defense_labels)):
            if self.attack.targeted:
                if self.adv_labels[i] == self.targets[i]: # 攻击误导成功
                    attack_affected += 1
                    if defense_labels[i] == self.labels[i]: # 防御成功避免误导
                        defense_success += 1
            else:
                if self.adv_labels[i] != self.labels[i]: # 攻击误导成功
                    attack_affected += 1
                    if defense_labels[i] == self.labels[i]: # 防御成功避免误导
                        defense_success += 1
        return defense_success, attack_affected

def main(model_name, attack_name, defense_name_list, attack_config, defense_config_list, attack_suffix = '', defense_suffix_list = None):
    # 设置随机数种子
    set_seed()

    # 加载原始模型
    raw_model = get_model(model_name)

    # 加载攻击
    attack = get_attack(attack_name, raw_model, attack_config)

    # 加载评估
    ev = SecurityEvaluation(raw_model, attack, attack_suffix = attack_suffix)

    # 对每种防御进行评估
    result_list = []
    for i in range(len(defense_name_list)):
        defense_name = defense_name_list[i]
        defense_config = defense_config_list[i]
        defense_suffix = '' if defense_suffix_list == None else defense_suffix_list[i]
        labels = ev.defense_predict(get_defense(defense_name, model_name, defense_config), defense_suffix = defense_suffix)
        defense_success, attack_affected = ev.get_accuracy(labels)
        accuracy = defense_success / attack_affected

        print('For defense `{}`, accuracy = {:.0f}/{:.0f} = {:.1f}%\n'.format(
            defense_name, defense_success, attack_affected, accuracy * 100))

        result_list.append({
            'defenseSuccess': defense_success,
            'attackAffected': attack_affected,
            'accuracy': accuracy
        })

    return result_list
