'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-31 17:07:50
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 22:40:05
 * @Description: DynamicReLU
'''
import paddle_aux
import paddle


class DyReLU(paddle.nn.Layer):

    def __init__(self, channels, reduction=4, k=2, conv_type='2d'):
        super(DyReLU, self).__init__()
        self.channels = channels
        self.k = k
        self.conv_type = conv_type
        assert self.conv_type in ['1d', '2d']
        self.avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=1)
        self.fc1 = paddle.nn.Linear(in_features=channels, out_features=
            channels // reduction)
        self.relu = paddle.nn.ReLU()
        self.fc2 = paddle.nn.Linear(in_features=channels // reduction,
            out_features=2 * k)
        self.sigmoid = paddle.nn.Sigmoid()
        self.register_buffer(name='lambdas', tensor=paddle.to_tensor(data=[
            1.0] * k + [0.5] * k, dtype='float32').astype(dtype='float32'))
        self.register_buffer(name='init_v', tensor=paddle.to_tensor(data=[
            1.0] + [0.0] * (2 * k - 1), dtype='float32').astype(dtype=
            'float32'))

    def get_relu_coefs(self, x):
        theta = paddle.mean(x, axis=-1)
        if self.conv_type == '2d':
            theta = paddle.mean(theta, axis=-1)
        theta = self.avg_pool(theta)
        theta = self.fc1(theta)
        theta = self.relu(theta)
        theta = self.fc2(theta)
        theta = 2 * self.sigmoid(theta) - 1
        return theta

    def forward(self, x):
        raise NotImplementedError


class DyReLUA(DyReLU):

    def __init__(self, channels, reduction=4, k=2, conv_type='2d'):
        super(DyReLUA, self).__init__(channels, reduction, k, conv_type)
        self.fc2 = paddle.nn.Linear(in_features=channels // reduction,
            out_features=2 * k)

    def forward(self, x):
        assert tuple(x.shape)[1] == self.channels
        theta = self.get_relu_coefs(x)
        relu_coefs = theta.view(-1, 2 * self.k) * self.lambdas + self.init_v
        x_perm = x.transpose(perm=paddle_aux.transpose_aux_func(x.ndim, 0, -1)
            ).unsqueeze(axis=-1)
        output = x_perm * relu_coefs[:, :self.k] + relu_coefs[:, self.k:]
        result = (paddle.max(x=output, axis=-1), paddle.argmax(x=output,
            axis=-1))[0].transpose(perm=paddle_aux.transpose_aux_func((
            paddle.max(x=output, axis=-1), paddle.argmax(x=output, axis=-1)
            )[0].ndim, 0, -1))
        return result


class DyReLUB(DyReLU):

    def __init__(self, channels, reduction=4, k=2, conv_type='2d'):
        super(DyReLUB, self).__init__(channels, reduction, k, conv_type)
        self.fc2 = paddle.nn.Linear(in_features=channels // reduction,
            out_features=2 * k * channels)

    def forward(self, x):
        assert tuple(x.shape)[1] == self.channels
        theta = self.get_relu_coefs(x)
        relu_coefs = theta.view(-1, self.channels, 2 * self.k
            ) * self.lambdas + self.init_v
        if self.conv_type == '1d':
            x_perm = x.transpose(perm=[2, 0, 1]).unsqueeze(axis=-1)
            output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :,
                self.k:]
            result = (paddle.max(x=output, axis=-1), paddle.argmax(x=output,
                axis=-1))[0].transpose(perm=[1, 2, 0])
        elif self.conv_type == '2d':
            x_perm = x.transpose(perm=[2, 3, 0, 1]).unsqueeze(axis=-1)
            output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :,
                self.k:]
            result = (paddle.max(x=output, axis=-1), paddle.argmax(x=output,
                axis=-1))[0].transpose(perm=[2, 3, 0, 1])
        return result


class DyReLUC(paddle.nn.Layer):

    def __init__(self, channels, reduction=4, k=2, tau=10, gamma=1 / 3):
        super().__init__()
        self.channels = channels
        self.reduction = reduction
        self.k = k
        self.tau = tau
        self.gamma = gamma
        self.coef = paddle.nn.Sequential(paddle.nn.AdaptiveAvgPool2D(
            output_size=1), paddle.nn.Conv2D(in_channels=channels,
            out_channels=channels // reduction, kernel_size=1), paddle.nn.
            ReLU(), paddle.nn.Conv2D(in_channels=channels // reduction,
            out_channels=2 * k * channels, kernel_size=1), paddle.nn.Sigmoid())
        self.sptial = paddle.nn.Conv2D(in_channels=channels, out_channels=1,
            kernel_size=1)
        self.register_buffer(name='lambdas', tensor=paddle.to_tensor(data=[
            1.0] * k + [0.5] * k, dtype='float32').astype(dtype='float32'))
        self.register_buffer(name='bias', tensor=paddle.to_tensor(data=[1.0
            ] + [0.0] * (2 * k - 1), dtype='float32').astype(dtype='float32'))

    def forward(self, x):
        N, C, H, W = tuple(x.shape)
        coef = self.coef(x)
        coef = 2 * coef - 1
        coef = coef.view(-1, self.channels, 2 * self.k
            ) * self.lambdas + self.bias
        gamma = self.gamma * H * W
        spatial = self.sptial(x)
        spatial = spatial.view(N, self.channels, -1) / self.tau
        spatial = paddle.nn.functional.softmax(x=spatial, axis=-1) * gamma
        spatial = paddle.clip(x=spatial, min=0, max=1).view(N, 1, H, W)
        x_perm = x.transpose(perm=[2, 3, 0, 1]).unsqueeze(axis=-1)
        output = x_perm * coef[:, :, :self.k] + coef[:, :, self.k:]
        spatial = spatial.transpose(perm=[2, 3, 0, 1]).unsqueeze(axis=-1)
        output = spatial * output
        result = (paddle.max(x=output, axis=-1), paddle.argmax(x=output,
            axis=-1))[0].transpose(perm=[2, 3, 0, 1])
        return result
