import math
from typing import Union, Tuple, Optional

import paddle.fluid.layers as L
from paddle.fluid import ParamAttr
from paddle.fluid.dygraph import Conv2D, BatchNorm, Pool2D, \
    Layer as FluidLayer, Linear as LinearLayer, Sequential as FluidSequential, LayerList as FluidLayerList
from paddle.fluid.initializer import Constant, Uniform, Normal

from helm.static.models.pprint import layer_repr

__all__ = ['Pool2d', 'GlobalAvgPool', "Conv2d", "Identity", "Act", "Sequential",
           "flatten", 'BN', "DEFAULTS", "update_defaults", "Linear", "Layer", "LayerList"]

from paddle.fluid.regularizer import L2Decay

DEFAULTS = {
    'bn': {
        'momentum': 0.9,
        'eps': 1e-5,
        'affine': True,
    },
    'activation': 'relu',
    'fp16': False,
    'init': {
        'type': 'msra',
        'mode': 'fan_in',
        'uniform': False,
        'std': 0.01,
        'scale': 1.0,
    },
    'seed': 0,
    'no_bias_decay': False,
}


class Layer(FluidLayer):

    def __repr__(self):
        return layer_repr(self)

    def __dir__(self):
        module_attrs = dir(self.__class__)
        attrs = list(self.__dict__.keys())
        parameters = list(self._parameters.keys())
        modules = list(self._sub_layers.keys())
        keys = module_attrs + attrs + parameters + modules

        # Eliminate attrs that are not legal Python variable names
        keys = [key for key in keys if not key[0].isdigit()]

        return sorted(keys)


class Sequential(FluidSequential, Layer):
    pass


class LayerList(FluidLayerList, Layer):
    pass


def update_defaults(new_d, d):
    for k, v in new_d.items():
        if k not in d:
            raise KeyError(str(k))
        if isinstance(v, dict):
            update_defaults(v, d[k])
        else:
            d[k] = new_d[k]


def calc_fan(in_channels, out_channels, kernel_size: Optional[Tuple[int, int]]):
    if kernel_size is None:
        k = 1
    else:
        k = kernel_size[0] * kernel_size[1]
    return in_channels * k, out_channels * k


def msra_init(fan_in, fan_out, mode, uniform, scale=1.0):
    if mode == 'fan_out':
        fan = fan_out
    elif mode == 'fan_in':
        fan = fan_in
    else:
        raise ValueError("Unsupported mode: %s" % mode)
    if uniform:
        bound = math.sqrt(6 * scale / fan)
        return Uniform(-bound, bound, seed=DEFAULTS['seed'])
    else:
        std = math.sqrt(2 * scale / fan)
        return Normal(0, std, seed=DEFAULTS['seed'])


def Act(name='default'):
    if name == 'default':
        return Act(DEFAULTS['activation'])
    elif name == 'relu':
        return ReLU()
    elif name == 'relu6':
        return ReLU6()
    elif name == 'swish':
        return Swish()
    elif name == 'sigmoid':
        return Sigmoid()
    elif name == 'hsigmoid':
        return HardSigmoid()
    else:
        raise ValueError("No activation named: %s" % name)


class Identity(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return x


def flatten(x):
    c = x.shape[1] * x.shape[2] * x.shape[3]
    return L.reshape(x, [-1, c])


def BN(channels, affine=None, zero_init=False):
    cfg = DEFAULTS['bn']
    reg = L2Decay(0.) if DEFAULTS['no_bias_decay'] else None
    if zero_init:
        param_attr = ParamAttr(initializer=Constant(0.), regularizer=reg)
    else:
        param_attr = ParamAttr(initializer=Constant(1.), regularizer=reg)
    bias_attr = ParamAttr(initializer=Constant(0.), regularizer=reg)
    bn = BatchNorm(num_channels=channels,
                   momentum=cfg['momentum'], epsilon=cfg['eps'],
                   param_attr=param_attr, bias_attr=bias_attr)
    if affine is None:
        affine = cfg['affine']
    if not affine:
        bn.weight.trainable = False
        bn.weight.stop_gradient = True
        bn.bias.trainable = False
        bn.bias.stop_gradient = True
    return bn


def Linear(in_channels, out_channels, act=None):
    param_attr = ParamAttr(initializer=msra_init(in_channels, out_channels, 'fan_in', uniform=False))
    if not act:
        return LinearLayer(in_channels, out_channels, param_attr=param_attr)
    else:
        return Sequential(
            ("fc", LinearLayer(in_channels, out_channels, param_attr=param_attr)),
            ("act", Act(act)),
        )


def Conv2d(in_channels: int,
           out_channels: int,
           kernel_size: Union[int, Tuple[int, int]],
           stride: Union[int, Tuple[int, int]] = 1,
           padding: Union[str, int, Tuple[int, int]] = 'same',
           groups: int = 1,
           dilation: int = 1,
           bias: Optional[bool] = None,
           bn: bool = False,
           act: Optional[str] = None,
           zero_init=False):

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if isinstance(stride, int):
        stride = (stride, stride)

    if padding == 'same':
        kh, kw = kernel_size
        ph = (kh + (kh - 1) * (dilation - 1) - 1) // 2
        pw = (kw + (kw - 1) * (dilation - 1) - 1) // 2
        padding = (ph, pw)

    use_cudnn = DEFAULTS['fp16'] or (out_channels != groups)

    init_cfg = DEFAULTS['init']
    if init_cfg['type'] == 'msra':
        fan_in, fan_out = calc_fan(in_channels, out_channels, kernel_size)
        init = msra_init(fan_in, fan_out, init_cfg['mode'], init_cfg['uniform'], init_cfg['scale'])
    elif init_cfg['type'] == 'normal':
        init = Normal(0, init_cfg['std'], seed=DEFAULTS['seed'])
    else:
        raise ValueError("Unsupported init type: %s" % init_cfg['type'])
    param_attr = ParamAttr(initializer=init)

    if bias is False:
        bias_attr = False
    elif bias is None and bn:
        bias_attr = False
    else:
        if DEFAULTS['no_bias_decay']:
            bias_attr = ParamAttr(initializer=Constant(0.), regularizer=L2Decay(0.))
        else:
            bias_attr = ParamAttr(initializer=Constant(0.))

    layers = [
        ("conv", Conv2D(
            num_channels=in_channels,
            num_filters=out_channels,
            filter_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            dilation=dilation,
            use_cudnn=use_cudnn,
            param_attr=param_attr,
            bias_attr=bias_attr,
            dilation=dilation,
        ))
    ]
    if bn:
        layers.append(("bn", BN(out_channels, zero_init=zero_init)))
    if act:
        layers.append(("act", Act(act)))
    if len(layers) == 1:
        return layers[0][1]
    else:
        return Sequential(*layers)


def Pool2d(kernel_size, stride, padding='same', type='avg', ceil_mode=False):

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if padding == 'same':
        kh, kw = kernel_size
        ph = (kh - 1) // 2
        pw = (kw - 1) // 2
        padding = (ph, pw)

    return Pool2D(
        pool_size=kernel_size,
        pool_stride=stride,
        pool_padding=padding,
        pool_type=type,
        ceil_mode=ceil_mode,
    )


class GlobalAvgPool(Layer):

    def __init__(self, keep_dim=False):
        super().__init__()
        self.keep_dim = keep_dim

    def forward(self, x):
        return L.reduce_mean(x, dim=[2, 3], keep_dim=self.keep_dim)


class ReLU(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.relu(x)


class ReLU6(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.relu6(x)


class Swish(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.swish(x)


class Sigmoid(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.sigmoid(x)


class HardSigmoid(Layer):

    def __init__(self):
        super().__init__()

    def forward(self, x):
        return L.hard_sigmoid(x)