from typing import Union, Tuple, Optional

import paddle.fluid.layers as L
from paddle.fluid.dygraph import Conv2D, BatchNorm, Pool2D, Sequential, Layer

__all__ = ['BNAct', 'Pool2d', 'GlobalAvgPool', "Conv2d", "flatten", 'BN']


def Act(name='relu'):
    if name == 'relu':
        return ReLU()
    elif name == 'swish':
        return Swish()
    else:
        raise ValueError("No activation named: %s" % name)


def flatten(x):
    c = x.shape[1] * x.shape[2] * x.shape[3]
    return L.reshape(x, [-1, c])


def BN(channels, momentum=0.9, epsilon=1e-5):
    bn = BatchNorm(
        num_channels=channels,
        momentum=momentum,
        epsilon=epsilon
    )
    return bn


def BNAct(channels, act='relu', momentum=0.9, epsilon=1e-5):
    assert act is not None
    bn = BatchNorm(
        num_channels=channels,
        act=act,
        momentum=momentum,
        epsilon=epsilon
    )
    return bn


def Conv2d(in_channels: int,
           out_channels: int,
           kernel_size: Union[int, Tuple[int, int]],
           stride: Union[int, Tuple[int, int]] = 1,
           padding: Union[str, int, Tuple[int, int]] = 'same',
           groups: int = 1,
           dilation: int = 1,
           bias: Optional[bool] = None,
           bn: bool = False,
           act: Optional[str] = None,
           use_cudnn: bool = True):

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if padding == 'same':
        kh, kw = kernel_size
        ph = (kh + (kh - 1) * (dilation - 1) - 1) // 2
        pw = (kw + (kw - 1) * (dilation - 1) - 1) // 2
        padding = (ph, pw)

    if bias is True:
        bias = None
    if bn or act is not None:
        if bn:
            conv = Conv2D(
                num_channels=in_channels,
                num_filters=out_channels,
                filter_size=kernel_size,
                stride=stride,
                padding=padding,
                groups=groups,
                use_cudnn=use_cudnn,
                bias_attr=False,
            )
            if act:
                return Sequential(
                    ("conv", conv),
                    ("bn", BNAct(out_channels, act)),
                )
            else:
                return Sequential(
                    ("conv", conv),
                    ("bn", BN(out_channels)),
                )
        else:
            conv = Conv2D(
                num_channels=in_channels,
                num_filters=out_channels,
                filter_size=kernel_size,
                stride=stride,
                padding=padding,
                groups=groups,
                bias_attr=bias,
                use_cudnn=use_cudnn,
                act=act,
            )
            return conv
    else:
        return Conv2D(
            num_channels=in_channels,
            num_filters=out_channels,
            filter_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            bias_attr=bias,
            use_cudnn=use_cudnn,
        )


def Pool2d(kernel_size, stride, padding='same', type='avg', ceil_mode=False):

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if padding == 'same':
        kh, kw = kernel_size
        ph = (kh - 1) // 2
        pw = (kw - 1) // 2
        padding = (ph, pw)

    return Pool2D(
        pool_size=kernel_size,
        pool_stride=stride,
        pool_padding=padding,
        pool_type=type,
        ceil_mode=ceil_mode,
    )


class GlobalAvgPool(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.reduce_mean(x, dim=[2, 3])


class ReLU(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.relu(x)


class Swish(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.swish(x)


class Identity(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return x

