import torch


def improved_PGD(
        model_fn,
        x_imgs,
        x_targets,
        y,
        loss_fn,
        eps,
        nb_iter,
        clip_min,
        clip_max,
        targeted=False
):
    eta = torch.zeros_like(x_imgs).uniform_(-eps, eps)
    adv_x = x_imgs + eta

    i = 0
    while i < nb_iter:
        adv_x = torch.clamp(adv_x, clip_min, clip_max)

        # x needs to be a leaf variable, of floating point type and have requires_grad being True for
        # its grad to be computed and stored properly in a backward call
        x_imgs = x_imgs.clone().detach().to(torch.float).requires_grad_(True)

        # Compute loss
        loss = loss_fn(model_fn(x_imgs, x_targets), y)
        # If attack is targeted, minimize loss of target label rather than maximize loss of correct label
        if targeted:
            loss = -loss

        # Define gradient of loss wrt input
        loss.backward()

        # Take sign of gradient
        grad = x_imgs.grad.clone()
        grad = torch.where(torch.abs(x_imgs - adv_x) >= eps, 0, grad)
        grad = torch.where(torch.bitwise_and(adv_x == clip_min, adv_x == clip_max), 0, grad)
        scaling_value = torch.max(torch.abs(torch.min(grad)), torch.abs(torch.max(grad)))
        normalized_grad = torch.div(grad, scaling_value)  # a grad tensor between -1 and 1
        base = 2 + i
        perturbation = torch.sign(normalized_grad) * torch.pow(base, torch.abs(normalized_grad)) * eps / base

        # Add perturbation to original example to obtain adversarial example
        adv_x = x_imgs + perturbation

        # Clipping perturbation eta
        adv_x = torch.clamp(adv_x, x_imgs - eps, x_imgs + eps)

        i += 1

    return adv_x
