import paddle.fluid as fluid
from paddle.fluid import layers as L
from helm.static.models.layers import GlobalAvgPool, DEFAULTS, Sequential, Layer, Conv2d, BN, Act


class SELayer(Layer):

    def __init__(self, in_channels, reduction):
        super().__init__()
        channels = in_channels // reduction
        self.attn = Sequential(
            ("pool", GlobalAvgPool(keep_dim=True)),
            ("fc1", Conv2d(in_channels, channels, 1, bn=True, act='relu')),
            ("fc2", Conv2d(channels, in_channels, 1, act='sigmoid')),
        )

    def forward(self, x):
        return x * self.attn(x)


class DropPath(Layer):

    def __init__(self, drop_prob, is_test=False):
        super().__init__()
        self.drop_prob = drop_prob
        self.is_test = is_test

    def forward(self, x):
        if fluid.in_dygraph_mode():
            if self.training:
                return drop_path(x, self.drop_prob)
            else:
                return x
        if self.is_test:
            return x
        else:
            return drop_path(x, self.drop_prob)


def drop_path(x, prob):
    keep_prob = 1 - prob
    mask = L.cast(L.uniform_random([L.shape(x)[0]], seed=DEFAULTS['seed']) < keep_prob, x.dtype)
    mask = L.unsqueeze(mask, list(range(1, len(x.shape))))
    return x * mask / keep_prob



def round_channels(channels, divisor=8, min_depth=None):
    min_depth = min_depth or divisor
    new_channels = max(min_depth, int(channels + divisor / 2) // divisor * divisor)
    if new_channels < 0.9 * channels:
        new_channels += divisor
    return int(new_channels)



class SplAtConv2d(Layer):
    """Split-Attention Conv2d
    """
    def __init__(self, in_channels, channels, kernel_size, stride=1, padding='same',
                 dilation=1, groups=1, bias=None, radix=2, reduction=4):
        super().__init__()
        inter_channels = round_channels(max(in_channels * radix // reduction, 32), groups)
        self.radix = radix
        self.cardinality = groups
        self.channels = channels

        self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, groups=groups*radix,
                           dilation=dilation, bias=bias)
        self.bn = BN(channels * radix)
        self.act = Act()
        self.attn = Sequential(
            ("pool", GlobalAvgPool(keep_dim=True)),
            ("fc1", Conv2d(channels, inter_channels, 1, groups=1, bn=True, act='default')),
            ("fc2", Conv2d(inter_channels, channels*radix, 1, groups=1)),
            ("rsoftmax", rSoftMax(radix, groups)),
        )

    def forward(self, x):
        x = self.conv(x)

        if self.radix > 1:
            splited = L.split(x, self.radix, dim=1)
            gap = sum(splited)
        else:
            gap = x

        gap = self.attn(gap)

        if self.radix > 1:
            attns = L.split(gap, self.radix, dim=1)
            out = sum([att*split for (att, split) in zip(attns, splited)])
        else:
            out = gap * x
        return out


class rSoftMax(Layer):

    def __init__(self, radix, cardinality):
        super().__init__()
        self.radix = radix
        self.cardinality = cardinality

    def forward(self, x):
        if self.radix > 1:
            b, c = x.shape[:2]
            ic = c // self.cardinality // self.radix
            x = L.reshape(x, [b, self.cardinality, self.radix, ic])
            x = L.transpose(x, [0, 2, 1, 3])
            x = L.softmax(x, axis=1)
            x = L.reshape(x, [b, c, 1, 1])
        else:
            x = L.sigmoid(x)
        return x