import math
import mindspore.nn as nn
from collections import defaultdict
from mindspore.ops import operations as P

import numpy as np
from mindspore import Tensor

from src.common import Activation

BATCH_NORM_MOMENTUM = 0.01
BATCH_NORM_EPSILON = 1e-3


def make_divisible(v, divisor=8, min_value=None):
    min_value = min_value or divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


def _round_filters(filters, multiplier=None, divisor=8, min_depth=None):
    """ Calculate and round number of filters based on depth multiplier. """
    if not multiplier:
        return filters
    filters *= multiplier
    min_depth = min_depth or divisor
    new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
    if new_filters < 0.9 * filters:  # prevent rounding by more than 10%
        new_filters += divisor
    return int(new_filters)


def _round_repeats(repeats, multiplier=None):
    """ Round number of filters based on depth multiplier. """
    return repeats if not multiplier else int(math.ceil(multiplier * repeats))


class SqueezeExcite(nn.Cell):
    def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, act_layer='relu', divisor=1, **kwargs):
        super(SqueezeExcite, self).__init__()
        reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
        self.pool = P.ReduceMean(keep_dims=True)
        self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, has_bias=True)
        self.act1 = Activation('swish')
        self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, has_bias=True)
        self.gate_fn = nn.Sigmoid()
        self.mul = P.Mul()

    def construct(self, x):
        out = self.pool(x, (2, 3))
        out = self.conv_reduce(out)
        out = self.act1(out)
        out = self.conv_expand(out)
        out = self.gate_fn(out)
        return self.mul(x, out)


class DepthwiseSeparableConv(nn.Cell):
    def __init__(self, in_chs: int, out_chs: int, dw_kernel_size=3,
                 stride=1, dilation=1, pad_type='', act_layer='relu', noskip=False,
                 pw_kernel_size=1, pw_act=False, se_ratio=0.25,
                 norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
        super().__init__()
        norm_kwargs = norm_kwargs or {}
        has_se = se_ratio is not None and se_ratio > 0.
        self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
        self.has_pw_act = pw_act  # activation after point-wise conv
        self.drop_path_rate = drop_path_rate

        self.conv_dw = nn.Conv2d(
            in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, group=in_chs, has_bias=False)
        self.bn1 = nn.BatchNorm2d(in_chs, **norm_kwargs)
        self.act1 = Activation(act_layer)

        # Squeeze-and-excitation
        if has_se:
            self.se = SqueezeExcite(in_chs, se_ratio=se_ratio)
        else:
            self.se = None

        self.conv_pw = nn.Conv2d(in_chs, out_chs, pw_kernel_size, has_bias=False)
        self.bn2 = norm_layer(out_chs, **norm_kwargs)
        self.act2 = Activation(act_layer) if self.has_pw_act else None
        self.add = P.TensorAdd()

    def construct(self, x):
        residual = x

        x = self.conv_dw(x)
        x = self.bn1(x)
        x = self.act1(x)

        if self.se is not None:
            x = self.se(x)

        x = self.conv_pw(x)
        x = self.bn2(x)
        if self.act2 is not None:
            x = self.act2(x)

        if self.has_residual:
            x = self.add(x, residual)
        return x


class InvertedResidual(nn.Cell):
    """ Inverted residual block w/ optional SE and CondConv routing"""

    def __init__(self, in_chs: int, out_chs: int, dw_kernel_size=3, stride=1, dilation=1, pad_type='', act_layer='relu',
                 noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0., se_kwargs=None,
                 norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_kwargs=None, drop_path_rate=0.):
        super(InvertedResidual, self).__init__()
        norm_kwargs = norm_kwargs or {}
        conv_kwargs = conv_kwargs or {}
        mid_chs = make_divisible(in_chs * exp_ratio)
        has_se = se_ratio is not None and se_ratio > 0.
        self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
        self.drop_path_rate = drop_path_rate

        # Point-wise expansion
        self.conv_pw = nn.Conv2d(in_chs, mid_chs, exp_kernel_size, **conv_kwargs)
        self.bn1 = norm_layer(mid_chs, **norm_kwargs)
        self.act1 = Activation(act_layer)

        # Depth-wise convolution
        self.conv_dw = nn.Conv2d(
            mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, group=mid_chs, **conv_kwargs)
        self.bn2 = norm_layer(mid_chs, **norm_kwargs)
        self.act2 = Activation(act_layer)

        # Squeeze-and-excitation
        if has_se:
            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
        else:
            self.se = None

        # Point-wise linear projection
        self.conv_pwl = nn.Conv2d(mid_chs, out_chs, pw_kernel_size, **conv_kwargs)
        self.bn3 = norm_layer(out_chs, **norm_kwargs)
        self.add = P.TensorAdd()

    def construct(self, x):
        residual = x

        # Point-wise expansion
        x = self.conv_pw(x)
        x = self.bn1(x)
        x = self.act1(x)

        # Depth-wise convolution
        x = self.conv_dw(x)
        x = self.bn2(x)
        x = self.act2(x)

        # Squeeze-and-excitation
        if self.se is not None:
            x = self.se(x)

        # Point-wise linear projection
        x = self.conv_pwl(x)
        x = self.bn3(x)

        if self.has_residual:
            x = self.add(x, residual)

        return x


class EfficientNet(nn.Cell):
    def __init__(self, level=0, drop_connect_rate=0., stem_size=32, pad_type='same', use_head_feature=False,
                 replace_stride_with_dilation=None, **kwargs):
        super().__init__()
        params_dict = {
            # Coefficients:   width,depth,res,dropout
            'efficientnet-b0': (1.0, 1.0, 224, 0.2),
            'efficientnet-b1': (1.0, 1.1, 240, 0.2),
            'efficientnet-b2': (1.1, 1.2, 260, 0.3),
            'efficientnet-b3': (1.2, 1.4, 300, 0.3),
            'efficientnet-b4': (1.4, 1.8, 380, 0.4),
            'efficientnet-b5': (1.6, 2.2, 456, 0.4),
            'efficientnet-b6': (1.8, 2.6, 528, 0.5),
            'efficientnet-b7': (2.0, 3.1, 600, 0.5),
        }
        width_coefficient, depth_coefficient, s, p = params_dict['efficientnet-b{}'.format(level)]
        self.drop_connect_rate = drop_connect_rate
        # self.dropout_rate = dropout_rate
        # Stem
        in_channels = 3  # rgb
        out_channels = _round_filters(stem_size, width_coefficient)  # number of output channels
        self.conv_stem = nn.Conv2d(in_channels, out_channels, 3, stride=2)
        self.bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=BATCH_NORM_MOMENTUM, eps=BATCH_NORM_EPSILON)
        self.act1 = Activation('swish')

        down_stride = 2
        self._channels = {down_stride: out_channels}
        self._stages = defaultdict(list)
        self._stages[down_stride] = [self.conv_stem, self.bn1, self.act1]
        # Build blocks
        self.blocks_cfg = [
            # repeat, kernel_size, stride, expand_ratio, output_channels, se_ratio, dilate
            [1, 3, 1, 1, 16, 0.25, False],
            [2, 3, 2, 6, 24, 0.25, False],
            [2, 5, 2, 6, 40, 0.25, False],
            [3, 3, 2, 6, 80, 0.25, False],
            [3, 5, 1, 6, 112, 0.25, False],
            [4, 5, 2, 6, 192, 0.25, False],
            [1, 3, 1, 6, 320, 0.25, False]
        ]
        if replace_stride_with_dilation is not None:
            n = len(replace_stride_with_dilation)
            for idx, replace in zip([1, 2, 3, 5][-n:], replace_stride_with_dilation):
                if replace:
                    self.blocks_cfg[idx][-1] = True
        dilation = 1
        self.blocks_cfg[-2][2] = kwargs.setdefault('last_stride', 2)
        self.blocks = []
        in_channels = out_channels
        block_args = {'pad_type': pad_type, 'norm_kwargs': {'eps': BATCH_NORM_EPSILON, 'momentum': BATCH_NORM_MOMENTUM}}
        block_idx = 0
        block_count = sum(_round_repeats(info[0], depth_coefficient) for info in self.blocks_cfg)
        for repeat, kernel_size, stride, expand_ratio, output_channels, se_ratio, dilate in self.blocks_cfg:
            out_channels = _round_filters(output_channels, width_coefficient)
            layers = []
            if dilate:
                dilation *= stride
                stride = 1
            down_stride *= stride
            for i in range(_round_repeats(repeat, depth_coefficient)):
                block_args['in_chs'] = in_channels
                block_args['out_chs'] = out_channels
                block_args['dw_kernel_size'] = kernel_size
                block_args['stride'] = stride
                block_args['dilation'] = dilation
                block_args['se_ratio'] = se_ratio
                block_args['act_layer'] = 'swish'
                block_args['drop_path_rate'] = drop_connect_rate * block_idx / block_count
                if expand_ratio == 1:
                    layers.append(DepthwiseSeparableConv(**block_args))
                else:
                    layers.append(InvertedResidual(**block_args, exp_ratio=expand_ratio))
                in_channels = out_channels
                stride = 1
                block_idx += 1
            layers = nn.SequentialCell(layers)
            self.blocks.append(layers)
            self._channels[down_stride] = out_channels
            self._stages[down_stride].append(layers)
        self.blocks = nn.SequentialCell(self.blocks)
        # Head
        self.use_head_feature = use_head_feature
        if self.use_head_feature:
            out_channels = _round_filters(1280, width_coefficient)
            self.conv_head = nn.Conv2d(in_channels, out_channels, 1)
            self.bn2 = nn.BatchNorm2d(num_features=out_channels, momentum=BATCH_NORM_MOMENTUM, eps=BATCH_NORM_EPSILON)
            self.act2 = Activation('swish')
            self._channels[down_stride] = out_channels
            self._stages[down_stride].extend([self.conv_head, self.bn2, self.act2])
        self._strides = list(self._stages.keys())
        self._stages = nn.CellList([nn.SequentialCell(layers) for layers in self._stages.values()])

    def construct(self, inputs):
        features = {}
        x = inputs
        x = self._stages[0](x)
        features["2"] = x
        x = self._stages[1](x)
        features["4"] = x
        x = self._stages[2](x)
        features["8"] = x
        x = self._stages[3](x)
        features["16"] = x
        x = self._stages[4](x)
        features["32"] = x
        return features

    @property
    def channels(self):
        return self._channels


def _test():
    import mindspore.ops as ops
    import mindspore.context as context
    context.set_context(device_target="GPU")
    import torch

    image_size = 256
    x = Tensor(np.random.rand(2, 3, image_size, image_size).astype(np.float32))
    net = EfficientNet(level=0)
    net.compile_and_run(x)
    y = net(x)
    print({k: v.shape for k, v in y.items()})


if __name__ == '__main__':
    _test()
