import torch
from torch.autograd import grad


def adversarial_example(batch_data,clean_loss,batch_size,epsilon,method="Gaussian"):
        batch_adv=batch_data.clone().detach()
        pos_grad=grad(outputs=clean_loss, inputs=batch_data.pos, grad_outputs=torch.ones_like(clean_loss),create_graph=True)[0]
        batch_adv.pos = batch_adv.pos*(1 + epsilon*torch.sign(pos_grad))
        # print(pos_grad)
        atom_num=batch_adv.pos.shape[0]//batch_size# 原子数
        for j in range(batch_adv.y.shape[0]):
            atom_index=batch_adv.fix[j]  # 分子序号
            if atom_index==0:
                batch_adv.pos[atom_num*j:atom_num*(j+1)]=batch_data.pos[atom_num*j:atom_num*(j+1)] 
                continue
            atom_pos=batch_adv.pos[atom_num*j:atom_num*(j+1)] 
            balance_point=batch_adv.balance_R[atom_num*j:atom_num*(j+1)]

            if method == "Gaussian":
                # 高斯求法
                sigma2=batch_adv.sigma_2[j]
                batch_adv.force[atom_num*j:atom_num*(j+1)] = (balance_point-atom_pos)/sigma2
            elif method == "Riemann-Gauss":
                # 黎曼求法
                Y1 = atom_pos - torch.mean(atom_pos, dim=0)
                beta = batch_adv.sigma2[j]
                batch_adv.force[atom_num*j:atom_num*(j+1)] = beta*(Y1-balance_point)
            else:
                raise ValueError("method 输入值不合法！")

        return batch_adv
