"""Functional interface to a PGD attack.
"""
__author__ = "Bryse Flowers <brysef@vt.edu>"

# External Includes
import numpy as np

import torch
from torch.autograd import Variable
from torch.nn.functional import cross_entropy
import torch.nn.functional as F
import torch.nn as nn

from typing import Union

# Internal Includes
from preprocess import cal_energy, cal_power
from .tools import _convert_or_throw, _infer_input_size, _normalize
from .tools import _random_uniform_start, _compute_multiplier

class Slicer(nn.Module):
    """Turn long continuous signals into discrete examples with a fixed width.

    This can be thought of as batching up discrete examples to perform classification on
    in a real system.  It starts at *offset* and creates as many examples as needed to
    fit all (though it will not create undersized examples so some may be thrown away)
    samples into discrete chunks.  The examples are then concatenated in the batch
    dimension.  The channel and IQ dimensions remain unchanged and naturally the time
    dimension will be identical to *width*.

    This module is differentiable and can therefore be directly integrated in a training
    chain.

    Args:
        width (int): Size of the examples or "number of samples" in the time dimension.
        offset (int, optional): Number of samples to skip at the beginning and end.
                                This can be useful for ignoring filter transients on the
                                sides where the data is unusable.  Defaults to 0.

    Raises:
        ValueError: If width is not a positive integer.
        ValueError: If offset is negative.

    This module assumes that the input is formatted as BxCxIQxT.  The returned output
    from the forward pass will have a large batch dimension and the time dimension will
    match the *width* provided.  The other dimensions are left unchanged.
    """

    def __init__(self, width: int, offset: int = 0):
        if width < 1:
            raise ValueError("Width must be a positive integer, not {}".format(width))
        if offset < 0:
            raise ValueError("Offset cannot be negative -- you gave {}".format(offset))
        super(Slicer, self).__init__()

        self.width = width
        self.offset = offset

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        batch_dim = 0
        time_dim = 3

        n_samples = x.shape[time_dim]

        # Early return as a pass through if the signal is already properly shaped
        if n_samples == self.width:
            return x

        if n_samples < (2 * self.offset + self.width):
            raise ValueError(
                "Not enough samples to perform operation, "
                "input shape={shape}, width={width}, "
                "offset={offset}.".format(
                    shape=x.shape, width=self.width, offset=self.offset
                )
            )

        # First, compute the number of samples and chunks we will end up with
        # Trim off the edges based on offset
        n_samples = n_samples - 2 * self.offset
        # Make sure all examples are evenly sized to width, throwing away the final
        # samples if necessary
        n_chunks = int(np.floor(n_samples / self.width))
        n_samples = int(n_chunks * self.width)

        # Discard the samples outside of the offset ranges
        x = x.narrow(dim=time_dim, start=self.offset, length=n_samples)
        # Create n_chunks from the remaining samples
        # Because we performed the math above, this is ensured to come out to chunks of
        # self.width
        x = x.chunk(chunks=n_chunks, dim=time_dim)

        # Now that we have a list of examples, concatenate them in the batch dimension
        x = torch.cat(x, dim=batch_dim)

        return x


def epsilon_multiplier(spr: float, power: float) -> torch.Tensor:
    """Scale the signed gradient for an FGSM attack at the specified intensity (spr).

    .. math::

        \\text{grad} = \\text{sign}(\\nabla_X \\mathcalP{L}(f(\\theta, X), y_s))

        p = \\sqrt{\\frac{10^{\\frac{-\\text{spr}}{10}}}{2 \\times \\text{sps}}} \times \\text{grad}

    Args:
        sg (torch.Tensor): Signed gradient, consisting of only +/- 1, that is meant to
                           be linearly scaled to achieve the specified power (spr).
        spr (float): Desired Signal-to-Perturbation ratio in dB.
        sps (int, optional): Samples per symbol which is used for scaling the signed
                             gradient. Defaults to 8.
        energy(float): energy of signal used to calculate the energy of perturbation

    .. warn::

        This function assumes that Es is 1 when scaling the perturbation to achieve a
        desired SPR.

    Returns:
        torch.Tensor: Scaled perturbation (Same dimensions as input, sg)
    """
    if spr == np.inf:
        return 0
    power_p = power * pow(10, -spr / 10.0)
    epsilon = power_p / 2
    epsilon = pow(epsilon, 0.5)
    return epsilon

def pgd(
    x: torch.Tensor,
    y: Union[torch.Tensor, int],
    net: nn.Module,
    spr: float,
    k: int,
    input_size: int = None,
    PL: bool = False,
    sps: int = 8,
) -> torch.Tensor:
    """Projected Gradient Descent attack

    Args:
        x (torch.Tensor): Continuous input signal (BxCxIQxN)
        y (Union[torch.Tensor, int]): The categorical (integer) label for the input
                                      signals.  This can either be a single integer,
                                      which is then assumed to be the label for all
                                      inputs, or it can be a a Tensor (B) specifying a
                                      label for each batch of x.
        k (int): Number of iterations to use for the attack.
        net (Model): Classification model to use for computing the gradient signal.
        input_size (int, optional): Number of time samples that net takes in at a time.
                                    If not provided, it is inferred from the x shape.
                                    Defaults to None.
        spr (float): Signal-to-Perturbation ratio (SPR) in dB that is used to scale the
                     power of the perturbation signal crafted and applied.
        sps (int, optional): Samples per symbol (sps) needed to compute the correct
                             scaling for achieving the desired spr. Defaults to 8.

    Returns:
        torch.Tensor: Perturbed signal (x + p) which is formatted as BxCxIQxN

    .. warn::

        This function assumes that Es is 1 when scaling the perturbation to achieve a
        desired SPR.  Therefore, it will first rescale the underlying example to ensure
        that is true.  Generally, this will not cause an issue because the model that
        uses the example will rescale the signal anyways based on its own normalization.

    Reference:
        Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, Adrian Vladu,
        "Towards Deep Learning Models Resistant to Adversarial Attacks",
        https://arxiv.org/abs/1706.06083
    """
    x, y = _convert_or_throw(x=x, y=y)
    if k < 1:
        raise ValueError("K must be a positive integer -- you gave {}".format(k))
    input_size = _infer_input_size(x=x, input_size=input_size)

    # x = _normalize(x=x, sps=sps)
    x = Slicer(width=input_size)(x)
    adv_x = x
    # adv_x = _random_uniform_start(x=x, spr=spr, sps=sps)

    # Create step_size based on the overall distance we want to travel
    # eps = _compute_multiplier(spr=spr, sps=sps)
    power_x = cal_power(x)
    power_x = power_x.view([power_x.size()[0], power_x.size()[1], 1, 1])
    eps = epsilon_multiplier(spr=spr, power=power_x)
    step_size = eps / float(k)

    # Compute the bounds on the feasible solution (the ball around the input)
    lower_bound = x - eps
    upper_bound = x + eps

    # Put the inputs/outputs onto the most probable device that the model is currently
    # on -- this could fail if the model gets split amongst multiple devies, but, that
    # doesn't happen in this code.
    adv_x = adv_x.to(net.device)
    y = y.to(net.device)
    upper_bound = upper_bound.to(net.device)
    lower_bound = lower_bound.to(net.device)
    step_size = step_size.to(net.device)

    # Ensure the model is in eval mode so that batch norm/dropout etc. doesn't take
    # effect -- in order to be transparent to the caller we need to restore the state
    # at the end.
    set_training = net.training
    if set_training:
        net.eval()

    for _k in range(k):
        # Perform forward/backward pass to get the gradient at the input
        adv_x.requires_grad = True
        # original net
        _y, _ = net(adv_x)
        loss = cross_entropy(_y, y)
        loss.backward()

        # Take the sign of the gradient that can be scaled later
        sg = torch.sign(adv_x.grad.data)

        # Take a step in the direction of the signed gradient then project back onto the
        # feasible set of solutions (the ball around the original example) by clipping
        adv_x = adv_x + step_size * sg
        # torch.clamp only supports a single value -- therefore we'd have to extract the
        # perturbation first, by subtracting the original example, then clip the
        # perturbation, then recreate the adversarial example by adding the perturbation
        # to the natural example -- this methodology represents one fewer function call
        adv_x = torch.max(adv_x, lower_bound)
        adv_x = torch.min(adv_x, upper_bound)
        # Ensure that gradients are still tracked on this new adversarial example
        adv_x = Variable(adv_x)

    # Restore the network state so the caller never notices the change
    if set_training:
        net.train()

    return adv_x
