from Normalize import Normalize, Blur, Normalize_TF
import timm
import scipy.stats as st
from attack_method import *
from loader import ImageNet, CVPR_GAME
from torch.nn import DataParallel
import random
import logging
from pytorch_grad_cam import GuidedBackpropReLUModel, BaselineModel, DeconvReLUModel, GothroughReLUModel
from pytorch_grad_cam.utils.image import show_cam_on_image, \
    deprocess_image, \
    preprocess_image

parser = argparse.ArgumentParser()
parser.add_argument('--input_csv', type=str,
                    default='./data/input_dir/dev.csv', help='Input directory with images.')
parser.add_argument('--input_dir', type=str,
                    default='./data/input_dir/images/', help='Input directory with images.')
parser.add_argument('--output_dir', type=str,
                    default='./data/output_dir/', help='Input directory with images.')
parser.add_argument('--mean', type=float,
                    default=np.array([0.485, 0.456, 0.406]), help='mean.')
parser.add_argument('--std', type=float,
                    default=np.array([0.229, 0.224, 0.225]), help='std.')

parser.add_argument("--max_epsilon", type=float, default=16.0,
                    help="Maximum size of adversarial perturbation.")

parser.add_argument("--num_iter_set", type=int, default=10,
                    help="Number of iterations.")

parser.add_argument("--image_width", type=int, default=500,
                    help="Width of each input images.")
parser.add_argument("--image_height", type=int, default=500,
                    help="Height of each input images.")
# parser.add_argument("--image_resize", type=int, default=[560, 620, 680, 740, 800], help="Height of each input images.")
parser.add_argument("--image_resize", type=int, default=560,
                    help="Height of each input images.")

parser.add_argument("--batch_size", type=int, default=2,
                    help="How many images process at one time.")

parser.add_argument("--momentum", type=float, default=1.0, help="Momentum")
parser.add_argument("--amplification", type=float,
                    default=1.5, help="To amplifythe step size.")
parser.add_argument("--prob", type=float, default=0.7,
                    help="probability of using diverse inputs.")

parser.add_argument("--soft_label", type=float, default=1.0,
                    help="label smoothing.")
parser.add_argument("--threshold", nargs="+", type=float, default=[0.7],
                    help="guided bp threshold.")
parser.add_argument("--gpu-id", type=str, default="2")
parser.add_argument("--manualSeed", type=int, default=8)
parser.add_argument("-M", action="store_true")
parser.add_argument("-D", action="store_true")
parser.add_argument("-T", action="store_true")
parser.add_argument("-P", action="store_true")
parser.add_argument("--baseline", action="store_true")
parser.add_argument("--store_image", action="store_true")
parser.add_argument("--debug_mode", action="store_true")
parser.add_argument("--ssm", action="store_true")
parser.add_argument("-k", type=int, default=8)

opt = parser.parse_args()

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


def input_diversity(input_tensor):
    rnd = torch.randint(opt.image_width, opt.image_resize, ())
    rescaled = F.interpolate(
        input_tensor, size=[rnd, rnd], mode='bilinear', align_corners=True)
    h_rem = opt.image_resize - rnd
    w_rem = opt.image_resize - rnd
    pad_top = torch.randint(0, h_rem, ())
    pad_bottom = h_rem - pad_top
    pad_left = torch.randint(0, w_rem, ())
    pad_right = w_rem - pad_left
    pad_list = (pad_left, pad_right, pad_top, pad_bottom)
    padded = nn.ConstantPad2d(
        (pad_left, pad_right, pad_top, pad_bottom), 0.)(rescaled)
    padded = nn.functional.interpolate(
        padded, [opt.image_resize, opt.image_resize])
    return padded if torch.rand(()) < opt.prob else input_tensor


def ensemble_input_diversity(input_tensor, idx):
    # [560,620,680,740,800] --> [575, 650, 725, 800]
    rnd = torch.randint(opt.image_width, [575, 650, 725, 800][idx], ())
    rescaled = F.interpolate(
        input_tensor, size=[rnd, rnd], mode='bilinear', align_corners=True)
    h_rem = [575, 650, 725, 800][idx] - rnd
    w_rem = [575, 650, 725, 800][idx] - rnd
    pad_top = torch.randint(0, h_rem, ())
    pad_bottom = h_rem - pad_top
    pad_left = torch.randint(0, w_rem, ())
    pad_right = w_rem - pad_left
    pad_list = (pad_left, pad_right, pad_top, pad_bottom)
    padded = nn.ConstantPad2d(
        pad_list, 0.)(rescaled)
    padded = nn.functional.interpolate(
        padded, [opt.image_width, opt.image_height], mode='bilinear')
    return padded


torch.backends.cudnn.benchmark = True
transforms = T.Compose([T.ToTensor()])

use_cuda = True if opt.gpu_id != '-1' else False
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id

# Random seed
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
setup_seed(opt.manualSeed)


def gkern(kernlen=15, nsig=3):
    x = np.linspace(-nsig, nsig, kernlen)
    kern1d = st.norm.pdf(x)
    kernel_raw = np.outer(kern1d, kern1d)
    kernel = kernel_raw / kernel_raw.sum()
    return kernel


gaussian_kernel, stack_kern, kern_size = None, None, None
if opt.T:
    kernel_size = 5
    kernel = gkern(kernel_size, 3).astype(np.float32)
    gaussian_kernel = np.stack([kernel, kernel, kernel])
    gaussian_kernel = np.expand_dims(gaussian_kernel, 1)
    gaussian_kernel = torch.from_numpy(gaussian_kernel).cuda()

if opt.P:
    stack_kern, kern_size = project_kern(3)


def clip_by_tensor(t, t_min, t_max):
    result = (t >= t_min).float() * t + (t < t_min).float() * t_min
    result = (result <= t_max).float() * result + \
        (result > t_max).float() * t_max
    return result


def show_noise_in_iter(noise, i, debug_dir, name):
    for m in range(noise.shape[0]):
        iter_noise = noise[m].detach().permute(1, 2, 0).cpu().numpy()
        l2 = np.sqrt(np.sum(iter_noise ** 2))
        l1 = np.sum(np.abs(iter_noise))
        linf = np.max(np.abs(iter_noise))
        logging.info("\npic@{}, iter@{}, l2 = {}  inf = {}  l1 = {}  ".format(
            name[m].split('.')[0], i, l2, linf, l1))
        save_img(debug_dir + '/{}_noise_iter_{}'.format(
            name[m].split('.')[0], i), deprocess_image(iter_noise), split_channel=True)


def graph(x, gt, x_min, x_max, mulforward=1, debug_dir=None, name=None, **models):
    eps = opt.max_epsilon / 255.0
    num_iter = opt.num_iter_set
    alpha = eps / num_iter
    # alpha_beta = alpha * opt.amplification

    res = models['res50']

    adv = x.clone().cuda()
    batch_size = adv.shape[0]
    image_shape = adv.shape[1:]

    if mulforward > 1:
        adv = adv.tile(1, mulforward, 1, 1).reshape(-1, *image_shape)
        gt = gt[:, None].tile(mulforward).reshape(-1)
    adv.requires_grad = True
    # amplification = 0.0
    grad = torch.zeros(adv.shape).cuda()
    pre_grad = torch.zeros(adv.shape).cuda()
    mask = None
    for i in range(num_iter):
        soft_label = 1.0
        if opt.D:
            output1 = res.forward(ensemble_input_diversity(adv + pre_grad, 0))
            loss1 = F.cross_entropy(output1 * soft_label, gt, reduction="none")

            output3 = res.forward(ensemble_input_diversity(adv + pre_grad, 1))
            loss3 = F.cross_entropy(output3 * soft_label, gt, reduction="none")

            output4 = res.forward(ensemble_input_diversity(adv + pre_grad, 2))
            loss4 = F.cross_entropy(output4 * soft_label, gt, reduction="none")

            output5 = res.forward(ensemble_input_diversity(adv + pre_grad, 3))
            loss5 = F.cross_entropy(output5 * soft_label, gt, reduction="none")

            loss = (loss1 + loss3 + loss4 + loss5) / 4.0
        else:
            output = res.forward(adv)
            loss = F.cross_entropy(output * soft_label, gt, reduction="none")

        loss *= mulforward
        loss.mean().backward()
        noise = adv.grad.data.clone()
        pre_grad = adv.grad.data.clone()
        # TI-FGSM
        if opt.T:
            noise = F.conv2d(noise, gaussian_kernel, bias=None,
                             stride=1, padding=(2, 2), groups=3)

        # MI-FGSM
        if opt.M:
            noise = noise / torch.abs(noise).mean([1, 2, 3], keepdim=True)
            noise = opt.momentum * grad + noise
            grad = noise.clone()

        if False:
            pass
            # PI-FGSM + SSM
            # if opt.P and opt.use_ssm:
            #     amplification += alpha_beta * \
            #         torch_staircase_sign(noise, 1.5625)
            #     cut_noise = clip_by_tensor(
            #         abs(amplification) - eps, 0.0, 10000.0) * torch.sign(amplification)
            #     projection = alpha_beta * \
            #         torch_staircase_sign(project_noise(
            #             cut_noise, stack_kern, kern_size), 1.5625)
            #     noise = alpha_beta * torch_staircase_sign(noise,
            #                                                 1.5625) + projection
            #     # Occasionally, when the adversarial examples are crafted for an ensemble of networks with residual block by combined methods,
            #     # you may neet to comment the following line to get better result.
            #     amplification += projection
            # elif opt.P and not opt.use_ssm:
            #     amplification += alpha_beta * torch.sign(noise)
            #     cut_noise = clip_by_tensor(
            #         abs(amplification) - eps, 0.0, 10000.0) * torch.sign(amplification)
            #     projection = alpha_beta * \
            #         torch.sign(project_noise(
            #             cut_noise, stack_kern, kern_size))
            #     noise = alpha_beta * torch.sign(noise) + projection
            #     # Occasionally, when the adversarial examples are crafted for an ensemble of networks with residual block by combined methods,
            #     # you may neet to comment the following line to get better result.
            #     amplification += projection
            # elif opt.use_ssm:
            #     noise = alpha_beta * \
            #         torch_staircase_sign(noise, 1.5625)
            # else:
            #     noise = alpha * torch.sign(noise)

        if mulforward > 1:
            noise = noise.reshape(batch_size, mulforward,
                                  *image_shape).sum(dim=1)
            noise = alpha * torch.sign(noise)
            new_adv = clip_by_tensor(
                adv.data[::mulforward, ...] + noise, x_min, x_max)
            adv.data = new_adv.tile(
                1, mulforward, 1, 1).reshape(-1, *image_shape)
        else:
            noise = alpha * torch.sign(noise)
            adv.data += noise
            adv.data = clip_by_tensor(adv.data, x_min, x_max)

        if debug_dir and name:
            show_noise_in_iter(
                noise=noise, i=i, debug_dir=debug_dir, name=name)
        adv.grad.zero_()

    return adv[::mulforward, ...].detach()


def main():
    res = torch.nn.Sequential(Normalize(opt.mean, opt.std),
                              models.resnet50(pretrained=True).eval()).cuda()

    if opt.baseline:
        gb_model = BaselineModel(model=res)
    else:
        gb_model = GothroughReLUModel(model=res, threshold=opt.threshold)

    dense = DataParallel(torch.nn.Sequential(Normalize(opt.mean, opt.std),
                                             models.densenet121(pretrained=True).eval())).cuda()
    res101 = DataParallel(torch.nn.Sequential(Normalize(opt.mean, opt.std),
                                              models.resnet101(pretrained=True).eval())).cuda()
    vgg = DataParallel(torch.nn.Sequential(Normalize(opt.mean, opt.std),
                                           models.vgg19(pretrained=True).eval())).cuda()
    dense169 = DataParallel(torch.nn.Sequential(Normalize(opt.mean, opt.std),
                                                models.densenet169(pretrained=True).eval())).cuda()
    eff = DataParallel(nn.Sequential(Normalize_TF(), timm.create_model(
        'tf_efficientnet_b5', pretrained=True).eval())).cuda()

    # X = ImageNet(opt.input_dir, opt.input_csv, transforms)
    X = CVPR_GAME(opt.input_dir, opt.input_csv, transforms)
    data_loader = DataLoader(X, batch_size=opt.batch_size,
                             shuffle=False, pin_memory=True, num_workers=8)
    sum_dense, sum_res, sum_res101, sum_dense169, sum_vgg, sum_eff = 0, 0, 0, 0, 0, 0

    if opt.store_image and not os.path.exists(opt.output_dir):
        os.makedirs(opt.output_dir)

    iter_ = 0
    debug_dir = None
    if opt.debug_mode:
        debug_dir = os.path.join(opt.output_dir, "debug")
        if not os.path.exists(debug_dir):
            os.makedirs(debug_dir)

    for images, name, gt_cpu, target_cpu in tqdm(data_loader):
        iter_ += 1
        gt = gt_cpu.cuda()
        images = images.cuda()
        images_min = clip_by_tensor(images - opt.max_epsilon / 255.0, 0.0, 1.0)
        images_max = clip_by_tensor(images + opt.max_epsilon / 255.0, 0.0, 1.0)
        # adv_img = graph(images, gt, images_min, images_max, eff=eff, dense=dense, res50=res, res101=res101, dense169=dense169, vgg=vgg)

        adv_img = graph(images, gt, images_min, images_max, mulforward=len(
            opt.threshold), debug_dir=debug_dir, name=name, res50=gb_model)

        for i in range(len(adv_img)):
            noise = (adv_img[i] - images[i]
                     ).detach().permute(1, 2, 0).cpu().numpy()
            l2 = np.sqrt(np.sum(noise ** 2))
            l1 = np.sum(np.abs(noise))
            linf = np.max(np.abs(noise))
            logging.info("\nl2 = {}  inf = {}  l1 = {}  ".format(l2, linf, l1))
            if opt.store_image:
                save_img(
                    opt.output_dir + '/{}'.format(name[i]), adv_img[i].detach().permute(1, 2, 0).cpu())
                save_img(
                    opt.output_dir + '/{}_noise.jpg'.format(name[i].split('.')[0]), deprocess_image(noise))

        with torch.no_grad():
            sum_dense += (dense.forward(adv_img).argmax(1)
                          != gt).detach().sum().cpu()
            sum_res += (res.forward(adv_img).argmax(1)
                        != gt).detach().sum().cpu()
            sum_res101 += (res101.forward(adv_img).argmax(1)
                           != gt).detach().sum().cpu()
            sum_vgg += (vgg.forward(adv_img).argmax(1)
                        != gt).detach().sum().cpu()
            sum_dense169 += (dense169.forward(adv_img).argmax(1)
                             != gt).detach().sum().cpu()
            sum_eff += (eff.forward(adv_img).argmax(1)
                        != gt).detach().sum().cpu()

            if iter_ % 20 == 0:
                log_info = '\nres = {:.2%} \ndense = {:.2%} \nres101 = {:.2%} \nvgg = {:.2%} \ndense169 = {:.2%} \nsum_eff = {:.2%} '.format(
                    *(np.array([sum_res, sum_dense, sum_res101, sum_vgg, sum_dense169, sum_eff]) / (opt.batch_size * iter_))
                )
                logging.info(log_info)

    log_info = '\nres = {:.2%} \ndense = {:.2%} \nres101 = {:.2%} \nvgg = {:.2%} \ndense169 = {:.2%} \nsum_eff = {:.2%} '.format(
        *(np.array([sum_res, sum_dense, sum_res101, sum_vgg, sum_dense169, sum_eff]) / (opt.batch_size * iter_))
    )
    logging.info(log_info)


if __name__ == '__main__':
    log_file = "./logs/{}.log".format(os.path.basename(opt.output_dir))
    handlers = [logging.FileHandler(log_file), logging.StreamHandler()]
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
                        datefmt='%m-%d %H:%M',
                        handlers=handlers)
    logging.info('Started')
    logging.info(str(opt))
    main()
    logging.info('Finished\n\n')
