"""The Fast Gradient Method attack."""
import numpy as np
import torch
from torch.autograd import grad


def fast_gradient_method(
    model_fn,
    batch_data,
    eps,
    energy_and_force,
    p,
    clip_min=None,
    clip_max=None,
):

    # x needs to be a leaf variable, of floating point type and have requires_grad being True for
    # its grad to be computed and stored properly in a backward call
    batch_data.pos.requires_grad_(True)

    # Compute loss
    out = model_fn(batch_data)
    loss_fn = torch.nn.L1Loss()
    if energy_and_force:
        force = -grad(outputs=out, inputs=batch_data.pos, grad_outputs=torch.ones_like(out),create_graph=True,retain_graph=True)[0]
        e_loss = loss_fn(out, batch_data.y.unsqueeze(1))
        f_loss = loss_fn(force, batch_data.force)
        loss = e_loss + p * f_loss
    else:
        loss = loss_fn(out, batch_data.y.unsqueeze(1))

    # Define gradient of loss wrt input
    loss.backward()
    
    optimal_perturbation = eps * torch.sign(batch_data.pos.grad)
    # print("optimal_perturbation: ",optimal_perturbation)
    # Add perturbation to original example to obtain adversarial example
    batch_data.pos = batch_data.pos + optimal_perturbation

    # If clipping is needed, reset all values outside of [clip_min, clip_max]
    if (clip_min is not None) or (clip_max is not None):
        if clip_min is None or clip_max is None:
            raise ValueError(
                "One of clip_min and clip_max is None but we don't currently support one-sided clipping"
            )
        batch_data.pos = torch.clamp(batch_data.pos, clip_min, clip_max)

    return batch_data
