import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim


def trades_pgd_attack(model, x, attack_step, attack_lr=0.003, attack_eps=0.3, random_init=True, clamp=(0, 1)):
    model.eval()
    x_adv = x.clone().detach()
    if random_init:
        # Flag to use random initialization
        x_adv = x_adv + 0.001 * torch.randn(x.shape, device=x.device)

    criterion_kl = torch.nn.KLDivLoss(size_average=False)
    for i in range(attack_step):
        x_adv.requires_grad = True
        model.zero_grad()

        # attack
        logits = model(x)
        adv_logits = model(x_adv)
        loss_kl = criterion_kl(F.log_softmax(adv_logits, dim=1), F.softmax(logits, dim=1))
        loss_kl.backward()
        grad = x_adv.grad.detach()
        grad = grad.sign()
        x_adv = x_adv.detach() + attack_lr * grad

        # Projection
        x_adv = x + torch.clamp(x_adv - x, min=-attack_eps, max=attack_eps)
        x_adv = torch.clamp(x_adv, *clamp)
    # prob, pred = torch.max(logits, dim=1)
    return x_adv


def kl_loss(nat_logits, adv_logits, reduction='mean'):
    criterion_kl = torch.nn.KLDivLoss(reduction=reduction)
    loss_robust = criterion_kl(F.log_softmax(adv_logits, dim=1), F.softmax(nat_logits, dim=1))

    return loss_robust


# def trades_plus_loss(nat_logits, adv_logits, y_soft, beta=6.0, nat_ce_w=1.0, adv_ce_w=1.0, temp=1):
#     if nat_ce_w == 0:
#         ce_loss_nat = 0
#     else:
#         batch_size = nat_logits.size()[0]
#         ce_loss_nat = (1.0 / batch_size) * (-torch.sum(F.log_softmax(nat_logits, dim=1) * y_soft))
#
#     if adv_ce_w == 0:
#         ce_loss_adv = 0
#     else:
#         batch_size = adv_logits.size()[0]
#         ce_loss_adv = (1.0 / batch_size) * (-torch.sum(F.log_softmax(adv_logits, dim=1) * y_soft))
#
#     criterion_kl = torch.nn.KLDivLoss(size_average=False)
#     kl_loss = (1.0 / batch_size) * criterion_kl(F.log_softmax(adv_logits / temp, dim=1),
#                                                 F.softmax(nat_logits / temp, dim=1))
#
#     return nat_ce_w * ce_loss_nat + adv_ce_w * ce_loss_adv + beta * (kl_loss)


def trades_loss(nat_logits, adv_logits, y_soft, beta=6.0, type='trades'):
    batch_size = nat_logits.size()[0]
    nat_ce_loss = (1.0 / batch_size) * (-torch.sum(F.log_softmax(nat_logits, dim=1) * y_soft))
    adv_ce_loss = 0
    if type == 'trades-plus':
        adv_ce_loss = (1.0 / batch_size) * (-torch.sum(F.log_softmax(nat_logits, dim=1) * y_soft))

    criterion_kl = torch.nn.KLDivLoss(size_average=False)
    kl_loss = (1.0 / batch_size) * criterion_kl(F.log_softmax(adv_logits, dim=1), F.softmax(nat_logits, dim=1))

    loss = nat_ce_loss + adv_ce_loss + beta * kl_loss

    return nat_ce_loss, adv_ce_loss, kl_loss, loss


# def trades_attack_pro(model, x_natural, step_size=0.003, epsilon=0.031, perturb_steps=10, distance='Linf'):
#     model.eval()
#     batch_size = len(x_natural)
#     if distance == 'Linf':
#         x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).to(x_natural.device).detach()
#         for _ in range(perturb_steps):
#             x_adv.requires_grad_()
#             with torch.enable_grad():
#                 loss_kl = F.kl_div(F.log_softmax(model(x_adv), dim=1),
#                                    F.softmax(model(x_natural), dim=1),
#                                    reduction='sum')
#             grad = torch.autograd.grad(loss_kl, [x_adv])[0]
#             x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
#             x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
#             x_adv = torch.clamp(x_adv, 0.0, 1.0)
#     elif distance == 'L2':
#         delta = 0.001 * torch.randn(x_natural.shape).to(x_natural.device).detach()
#         delta = Variable(delta.data, requires_grad=True)
#
#         # Setup optimizers
#         optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
#
#         for _ in range(perturb_steps):
#             adv = x_natural + delta
#
#             # optimize
#             optimizer_delta.zero_grad()
#             with torch.enable_grad():
#                 loss = (-1) * F.kl_div(F.log_softmax(model(adv), dim=1),
#                                        F.softmax(model(x_natural), dim=1),
#                                        reduction='sum')
#             loss.backward()
#             # renorming gradient
#             grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
#             delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
#             optimizer_delta.step()
#
#             # projection
#             delta.data.add_(x_natural)
#             delta.data.clamp_(0, 1).sub_(x_natural)
#             delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
#         x_adv = Variable(x_natural + delta, requires_grad=False)
#     else:
#         x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).to(x_natural.device).detach()
#         x_adv = torch.clamp(x_adv, 0.0, 1.0)
#     return x_adv
