import paddle.fluid.layers as L
from paddle.fluid.dygraph import Linear
from helm.static.models.layers import Conv2d, Identity, GlobalAvgPool, Layer, Sequential


def _make_divisible(v, divisor, min_value=None):
    """
    This function is taken from the original tf repo.
    It ensures that all layers have a channel number that is divisible by 8
    It can be seen here:
    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    """
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


class SqueezeExcite(Layer):
    def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, divisor=4):
        super(SqueezeExcite, self).__init__()
        reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
        self.avg_pool = GlobalAvgPool()
        self.conv_reduce = Linear(in_chs, reduced_chs, act='relu')
        self.conv_expand = Linear(reduced_chs, in_chs, act='hard_sigmoid')

    def forward(self, x):
        s = self.avg_pool(x)
        s = self.conv_reduce(s)
        s = self.conv_expand(s)
        return L.elementwise_mul(x=x, y=s, axis=0)


class GhostModule(Layer):
    def __init__(self, inp, oup, kernel_size=1, dw_size=3, relu=True):
        super().__init__()
        self.oup = oup
        channels = oup // 2
        self.primary_conv = Conv2d(inp, channels, kernel_size,
                                   bn=True, act='default' if relu else None)

        self.cheap_operation = Conv2d(channels, channels, dw_size, groups=channels,
                                      bn=True, act='default' if relu else None)

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        out = L.concat([x1, x2], axis=1)
        return out


class GhostBottleneck(Layer):

    def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, se_ratio=0.):
        super().__init__()
        self.has_se = se_ratio is not None and se_ratio > 0.
        self.stride = stride

        # Point-wise expansion
        self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)

        if self.stride > 1:
            self.dwconv = Conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, groups=mid_chs, bn=True)

        # Squeeze-and-excitation
        if self.has_se:
            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)

        self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)

        # shortcut
        if in_chs == out_chs and self.stride == 1:
            self.shortcut = Identity()
        else:
            self.shortcut = Sequential(
                Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, groups=in_chs, bn=True),
                Conv2d(in_chs, out_chs, 1, bn=True),
            )

    def forward(self, x):
        residual = x
        x = self.ghost1(x)
        if self.stride > 1:
            x = self.dwconv(x)
        if self.has_se:
            x = self.se(x)
        x = self.ghost2(x)
        return x + self.shortcut(residual)
