'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-25 23:21:35
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-29 19:39:52
 * @Description: 
'''
import paddle_aux
import paddle
"""
Activation functions
"""


class SiLU(paddle.nn.Layer):

    @staticmethod
    def forward(x):
        return x * paddle.nn.functional.sigmoid(x=x)


class Hardswish(paddle.nn.Layer):

    @staticmethod
    def forward(x):
        return x * paddle.nn.functional.hardtanh(x=x + 3, min=0.0, max=6.0
            ) / 6.0


class Mish(paddle.nn.Layer):

    @staticmethod
    def forward(x):
        return x * paddle.nn.functional.softplus(x=x).tanh()


class MemoryEfficientMish(paddle.nn.Layer):


    class F(paddle.autograd.PyLayer):

        @staticmethod
        def forward(ctx, x):
            ctx.save_for_backward(x)
            return x.mul(paddle.nn.functional.tanh(x=paddle.nn.functional.
                softplus(x=x)))

        @staticmethod
        def backward(ctx, grad_output):
            """Class Attribute: torch.autograd.function.FunctionCtx.saved_tensors, can not convert, please check whether it is torch.Tensor.*/torch.autograd.function.FunctionCtx.*/torch.distributions.Distribution.* and convert manually"""
            x = ctx.saved_tensors[0]
            sx = paddle.nn.functional.sigmoid(x=x)
            fx = paddle.nn.functional.softplus(x=x).tanh()
            return grad_output * (fx + x * sx * (1 - fx * fx))

    def forward(self, x):
        return self.F.apply(x)


class FReLU(paddle.nn.Layer):

    def __init__(self, c1, k=3):
        super().__init__()
        self.conv = paddle.nn.Conv2D(in_channels=c1, out_channels=c1,
            kernel_size=k, stride=1, padding=1, groups=c1, bias_attr=False)
        self.bn = paddle.nn.BatchNorm2D(num_features=c1)

    def forward(self, x):
        return paddle_aux.max(x, self.bn(self.conv(x)))


class AconC(paddle.nn.Layer):
    """ ACON activation (activate or not).
    AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
    according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
    """

    def __init__(self, c1):
        super().__init__()
        self.p1 = paddle.base.framework.EagerParamBase.from_tensor(tensor=
            paddle.randn(shape=[1, c1, 1, 1]))
        self.p2 = paddle.base.framework.EagerParamBase.from_tensor(tensor=
            paddle.randn(shape=[1, c1, 1, 1]))
        self.beta = paddle.base.framework.EagerParamBase.from_tensor(tensor
            =paddle.ones(shape=[1, c1, 1, 1]))

    def forward(self, x):
        dpx = (self.p1 - self.p2) * x
        return dpx * paddle.nn.functional.sigmoid(x=self.beta * dpx
            ) + self.p2 * x


class MetaAconC(paddle.nn.Layer):
    """ ACON activation (activate or not).
    MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
    according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
    """

    def __init__(self, c1, k=1, s=1, r=16):
        super().__init__()
        c2 = max(r, c1 // r)
        self.p1 = paddle.base.framework.EagerParamBase.from_tensor(tensor=
            paddle.randn(shape=[1, c1, 1, 1]))
        self.p2 = paddle.base.framework.EagerParamBase.from_tensor(tensor=
            paddle.randn(shape=[1, c1, 1, 1]))
        self.fc1 = paddle.nn.Conv2D(in_channels=c1, out_channels=c2,
            kernel_size=k, stride=s, bias_attr=True)
        self.fc2 = paddle.nn.Conv2D(in_channels=c2, out_channels=c1,
            kernel_size=k, stride=s, bias_attr=True)

    def forward(self, x):
        y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
        beta = paddle.nn.functional.sigmoid(x=self.fc2(self.fc1(y)))
        dpx = (self.p1 - self.p2) * x
        return dpx * paddle.nn.functional.sigmoid(x=beta * dpx) + self.p2 * x


class h_sigmoid(paddle.nn.Layer):

    def __init__(self, inplace=True):
        super(h_sigmoid, self).__init__()
        self.relu = paddle.nn.ReLU6()

    def forward(self, x):
        return self.relu(x + 3) / 6


class h_swish(paddle.nn.Layer):

    def __init__(self, inplace=True):
        super(h_swish, self).__init__()
        self.sigmoid = h_sigmoid(inplace=inplace)

    def forward(self, x):
        y = self.sigmoid(x)
        return x * y
