import paddle as pp
from paddle import nn


class DropoutContext(object):
    def __init__(self):
        self.dropout = 0
        self.mask = None
        self.scale = 1
        self.reuse_mask = True


def get_mask(input, local_context):
    if not isinstance(local_context, DropoutContext):
        dropout = local_context
        mask = None
    else:
        dropout = local_context.dropout
        dropout *= local_context.scale
        mask = local_context.mask if local_context.reuse_mask else None

    if dropout > 0 and mask is None:
        # mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()
        mask = (1 - pp.bernoulli((pp.ones_like(input) * (1 - dropout)))).astype(pp.bool)

    if isinstance(local_context, DropoutContext):
        if local_context.mask is None:
            local_context.mask = mask

    return mask, dropout


class XDropout(pp.autograd.PyLayer):
    """ Borrowed from huggingface implementation of XDropout in pytorch. """
    @staticmethod
    def forward(ctx: pp.autograd.PyLayerContext,
                input: pp.Tensor,
                local_ctx: DropoutContext):
        mask, dropout = get_mask(input, local_ctx)
        ctx.scale = 1.0 / (1 - dropout)             # ctx has attr `scale`
        if dropout > 0:
            ctx.save_for_backward(mask)
            return pp.where(mask, input, pp.zeros_like(input)) * ctx.scale
        else:
            return input

    @staticmethod
    def backward(ctx: pp.autograd.PyLayerContext,
                 dy: pp.Tensor):
        if ctx.scale > 1:                           # ctx has attr `scale` (defined in forward)
            (mask,) = ctx.saved_tensor()
            return pp.where(mask, dy, pp.zeros_like(dy)) * ctx.scale
        else:
            return dy


class StableDropout(nn.Layer):
    """
    Optimized dropout module for stabilizing the training

    Args:
        drop_prob (float): the dropout probabilities
    """

    def __init__(self, drop_prob):
        super().__init__()
        self.drop_prob = drop_prob
        self.count = 0
        self.context_stack = None

    def forward(self, x):
        """
        Call the module

        Args:
            x (:obj:`torch.tensor`): The input tensor to apply dropout
        """
        if self.training and self.drop_prob > 0:
            return XDropout.apply(x, self.get_context())
        return x

    def clear_context(self):
        self.count = 0
        self.context_stack = None

    def init_context(self, reuse_mask=True, scale=1):
        if self.context_stack is None:
            self.context_stack = []
        self.count = 0
        for c in self.context_stack:
            c.reuse_mask = reuse_mask
            c.scale = scale

    def get_context(self):
        if self.context_stack is not None:
            if self.count >= len(self.context_stack):
                self.context_stack.append(DropoutContext())
            ctx = self.context_stack[self.count]
            ctx.dropout = self.drop_prob
            self.count += 1
            return ctx
        else:
            return self.drop_prob

