import random
import torch
import yaml
import cv2
import sys
import numpy as np
import argparse
from torch.utils.data import DataLoader

from attack.fgsm import FGSM
from attack.pgd import PGD
from attack.bim import BIM
from attack.cw2 import CW2
from attack.autopgd import AutoPGD
from corAttack.corrupt import CORRUPT
from sysAttack.system import SYSTEM

from Evaluation.acac import ACAC
from Evaluation.actc import ACTC
from Evaluation.acc import ACC
from Evaluation.asr import ASR
from Evaluation.bd import BD
from Evaluation.eni import ENI
from Evaluation.sns import SNS
from Evaluation.nsense import NSense
from Evaluation.mpath import MPath
from Evaluation.ncoverage import Ncoverage

from dataset import MyDataset
from select_imagenetmodel import load_imagenet_model

import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def str2bool(v):
    if isinstance(v, bool):
        return v
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.!')


def collect_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='resnet50')
    parser.add_argument('--attack', type=str, default='bim')
    parser.add_argument('--is_targeted', type=str2bool, default=False)
    parser.add_argument('--target_class', type=int, default=0)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--generate_adv', type=str2bool, default=False)
    parser.add_argument("--evaluation_method", type=str, default="ASR")

    # opt = vars(parser.parse_args())     # Object: Namespace -> 字典
    arguments = parser.parse_args()
    return arguments


def load_dataloader(batch_size):
    data_path = 'dataset/imagenet/images'
    label_path = 'dataset/imagenet/meta.txt'
    train_data = MyDataset(data_path, label_path)
    train_dataloader = DataLoader(train_data, batch_size=batch_size)
    return train_dataloader, len(train_data)


def load_method(model, device, attack, is_targeted, config):
    if attack == 'fgsm':
        attacker = FGSM(model, device, is_targeted, config)
    elif attack == 'pgd':
        attacker = PGD(model, device, is_targeted, config)
    elif attack == 'cw2':
        attacker = CW2(model, device, is_targeted, config)
    elif attack == 'autopgd':
        if is_targeted == False:
            attacker = AutoPGD(model, device, is_targeted, config)
        else:
            print("Autopgd only supports no target attacks, Please try again!")
            sys.exit(0)
    elif attack == 'bim':
        attacker = BIM(model, device, is_targeted, config)
    elif attack == 'corAttack':
        if is_targeted == False:
            attacker = CORRUPT(config)
        else:
            print("CorAttack only supports no target attacks, Please try again!")
            sys.exit(0)
    elif attack == 'sysAttack':
        if is_targeted == False:
            attacker = SYSTEM(
                root_dir='dataset/imagenet/images',
                meta_file='dataset/imagenet/meta.txt',
                config=config,
            )
        else:
            print("sysAttack only supports no target attacks, Please try again!")
            sys.exit(0)
    else:
        print("No such attack method, please try again!")
        sys.exit(0)
    return attacker


def load_config(attack):
    yaml_path = r'config/' + attack + '.yaml'
    with open(yaml_path, encoding='utf-8') as f:
        config = yaml.load(f.read(), Loader=yaml.FullLoader)
    return config


# 获取评测方法类
def get_evaluation(method, outputs_origin, outputs_adv, model, device):
    if method == 'ACC':
        return ACC(outputs_origin, outputs_adv, device)
    elif method == 'ASR':
        return ASR(outputs_origin, outputs_adv, device)
    elif method == 'ACAC':  # 平均置信度 越大攻击越强
        return ACAC(outputs_origin, outputs_adv, device)
    elif method == 'ACTC':  # 正确标签平均置信度 越小攻击越强
        return ACTC(outputs_origin, outputs_adv, device)
    elif method == 'BD':  # 最大边界距离 越大攻击越强
        return BD(outputs_origin, outputs_adv, device, model)
    elif method == 'NU':  # 神经元不确定性 越小攻击的不可感知性越强
        return ENI(outputs_origin, outputs_adv, device)
    elif method == 'SNS':
        return SNS(outputs_origin, outputs_adv, device, model)
    elif method == 'NSense':  # 神经元敏感度 越大攻击越强
        return NSense(model, device)
    elif method == 'MPath':  # 模型脆弱路径覆盖率 越大攻击越强
        return MPath(model, device)
    elif method == 'NCoverage':  # 神经元覆盖率  越大攻击越强
        return Ncoverage(outputs_origin, outputs_adv, device)
    else:
        print("No such evaluation method, please try again!")
        sys.exit(0)


def main(args):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)

    # 加载模型
    model = load_imagenet_model(args.model, device)

    model = model.eval()
    model = model.to(device)

    # 读取数据 tensor形式
    train_dataloader, total_num = load_dataloader(args.batch_size)

    # 读取算法参数config文件
    config = load_config(args.attack)

    # 构建攻击算法
    attacker = load_method(model, device, args.attack, args.is_targeted, config)

    eva = []
    idx = 0
    total = 0
    for data in train_dataloader:
        # 生成对抗样本
        image, label = data
        if args.is_targeted:
            target_class = args.target_class
            target = [target_class for i in range(len(label))]
            target = torch.tensor(target)
            target_preds = target
            image_adv = attacker.generate(image, target)
        else:
            target_preds = label
            image_adv = attacker.generate(image, label)

        # 攻击后的图片保存 保存第一个batch
        if args.generate_adv and idx == 0:
            if os.path.exists('image_adv') == False:
                os.mkdir('image_adv')
            path = r'image_adv/' + args.attack + '/'
            if os.path.exists(path) == False:
                os.mkdir(path)
            path1 = r'image_adv/ori/'
            if os.path.exists(path1) == False:
                os.mkdir(path1)
            for i in range(args.batch_size):
                image_save = image_adv[i]
                image_save = image_save.cpu().detach().numpy().transpose((1, 2, 0))
                image_out = cv2.cvtColor(image_save, cv2.COLOR_RGB2BGR)
                cv2.imwrite(
                    path + "image_" + args.attack + "_" + str(i) + ".jpg",
                    image_out * 255,
                )
                image_save = image[i]
                image_save = image_save.cpu().detach().numpy().transpose((1, 2, 0))
                image_out = cv2.cvtColor(image_save, cv2.COLOR_RGB2BGR)
                cv2.imwrite(path1 + "image_ori_" + str(i) + ".jpg", image_out * 255)

        image = image.to(device)
        image_adv = image_adv.to(device)

        # 分别预测
        image_out = model(image)
        image_out = image_out.cpu().detach().numpy()
        image_out_adv = model(image_adv)
        image_out_adv = image_out_adv.cpu().detach().numpy()
        adv_preds = np.argmax(image_out_adv, axis=1)  # # 对抗样本的判断类别

        # 输出
        print("第", idx, "个batch")
        print("True Label:", label.cpu().detach().numpy())
        print("predicted class:", np.argmax(image_out, axis=1))
        print("adversarial class:", np.argmax(image_out_adv, axis=1))

        image_out = torch.from_numpy(image_out).to(device)
        image_out_adv = torch.from_numpy(image_out_adv).to(device)
        adv_preds = torch.from_numpy(adv_preds).to(device)

        # 进行评估
        evaluation = get_evaluation(
            args.evaluation_method, image_out, image_out_adv, model, device
        )
        value = evaluation.evaluate(
            image_adv, image, label, adv_preds, target_preds, args.is_targeted
        )

        eva.append(value)
        idx += 1
        total += len(label)
        if idx == 5:
            break

    # 求平均
    last_batch_size = total % args.batch_size
    coeffi = [args.batch_size for i in range(idx - 1)]
    if last_batch_size == 0:
        coeffi.append(args.batch_size)
    else:
        coeffi.append(last_batch_size)
    coeffi = np.array(coeffi)
    value = np.sum(eva * coeffi) / total
    print(args.evaluation_method + ": " + str(value))
    return str(value)


#### for test all set


DEVICE_NAME = "2080Ti"
EXP_DIR = './record'

def deal_all_attack(args):
    os.makedirs(EXP_DIR, exist_ok=True)
    # fmt: off
    models = ['resnet50','vgg16','resnext26-m']
    evaluation_methods = ["ACC", "ASR", "ACAC", "ACTC", "BD", "NU", "NSense", "MPath", "NCoverage"]
    attack_methods = ['fgsm', 'bim', 'pgd']
    for model in models:
        headline = "none," + ",".join(evaluation_methods)
        table_name = model + '_adversarial_' + DEVICE_NAME + '.csv'
        f_table = open(os.path.join(EXP_DIR, table_name), 'w')
        f_table.write(headline + '\n')
        f_table.flush()
        for attack in attack_methods:
            attack_line = attack.upper()
            for evaluation_method in evaluation_methods:
                args.evaluation_method = evaluation_method
                args.model = model
                args.attack = attack
                value = main(args)
                attack_line += ',' + value
            f_table.write(attack_line + '\n')
            f_table.flush()
        f_table.close()


def modify_corrupt_config(attack, corruption_name):
    yaml_path = r'config/' + attack + '.yaml'
    with open(yaml_path, encoding='utf-8') as f:
        config = yaml.load(f.read(), Loader=yaml.FullLoader)
        f.close()
    config['corruption_name'] = corruption_name
    with open(yaml_path, 'w') as file:
        yaml.dump(config, file)
        file.close()


def modify_system_config(attack, decoder_type, resize_type):
    yaml_path = r'config/' + attack + '.yaml'
    with open(yaml_path, encoding='utf-8') as f:
        config = yaml.load(f.read(), Loader=yaml.FullLoader)
        f.close()
    config['decoder_type'] = decoder_type
    config['resize_type'] = resize_type
    with open(yaml_path, 'w') as file:
        yaml.dump(config, file)
        file.close()

def deal_all_corrupt(args):
    # fmt: off
    models = ['resnet50','vgg16','resnext26-m']
    evaluation_methods = ["ACC"]#, "ASR", "ACAC", "ACTC", "BD", "NU", "NSense", "MPath", "NCoverage"]
    category_attack = 'corAttack'
    corruption_name = "gaussian_noise, shot_noise, impulse_noise,defocus_blur, glass_blur,zoom_blur,frost, fog, brightness, contrast,elastic_transform, pixelate, jpeg_compression,speckle_noise, gaussian_blur, spatter, saturate"
    # corruption_name = "spatter"
    attack_methods = corruption_name.replace(" ", "").split(',')
    print(f"corrupt is {attack_methods}")
    for model in models:
        headline = "none," + ",".join(evaluation_methods)
        table_name = model + '_corAttack_' + DEVICE_NAME + '.csv'
        f_table = open(os.path.join(EXP_DIR, table_name), 'w')
        f_table.write(headline + '\n')
        f_table.flush()
        for attack in attack_methods:
            print(f"attack is {attack}")
            modify_corrupt_config(category_attack, corruption_name=attack)
            attack_line = attack
            for evaluation_method in evaluation_methods:
                args.evaluation_method = evaluation_method
                args.model = model
                args.attack = category_attack
                value = main(args)
                attack_line += ',' + value
            f_table.write(attack_line + '\n')
            f_table.flush()
        f_table.close()
        
def deal_all_system(args):
    # fmt: off
    models = ['resnet50','vgg16','resnext26-m']
    evaluation_methods = ["ACC", "ASR", "ACAC", "ACTC", "BD", "NU", "NSense", "MPath", "NCoverage"]
    category_attack = 'sysAttack'
    decoder_type = ['pil', 'opencv', 'ffmpeg']
    resize_type = [
        'pil-bilinear',
        'pil-nearest',
        'pil-box',
        'pil-hamming',
        'pil-cubic',
        'pil-lanczos',
        'opencv-nearest',
        'opencv-bilinear',
        'opencv-area',
        'opencv-cubic',
        'opencv-lanczos',
    ]

    attack_methods = resize_type + decoder_type
    decoder_types = ['pil'] * 6 + ['opencv'] * 5 + decoder_type
    resize_types = [
        'pil-bilinear',
        'pil-nearest',
        'pil-box',
        'pil-hamming',
        'pil-cubic',
        'pil-lanczos',
        'opencv-nearest',
        'opencv-bilinear',
        'opencv-area',
        'opencv-cubic',
        'opencv-lanczos',
        'pil-bilinear',
        'pil-bilinear',
        'pil-bilinear',
    ]
    print(f"systemAttack is {attack_methods}")
    for model in models:
        headline = "none," + ",".join(evaluation_methods)
        table_name = model + '_sysAttack_' + DEVICE_NAME + '.csv'
        f_table = open(os.path.join(EXP_DIR, table_name), 'w')
        f_table.write(headline + '\n')
        f_table.flush()
        for i, attack in enumerate(attack_methods):
            modify_system_config(
                category_attack,
                decoder_type=decoder_types[i],
                resize_type=resize_types[i],
            )
            attack_line = attack
            for evaluation_method in evaluation_methods:
                args.evaluation_method = evaluation_method
                args.model = model
                args.attack = category_attack
                value = main(args)
                attack_line += ',' + value
            f_table.write(attack_line + '\n')
            f_table.flush()
        f_table.close()        

def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)
     torch.backends.cudnn.deterministic = True

if __name__ == '__main__':
    setup_seed(2333)
    # 通过命令行执行方式
    args = collect_args()
    # deal_all_attack(args)
    deal_all_corrupt(args)
    deal_all_system(args)
