'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: mobilenet.py
@time: 2020-06-17 10:57:14
@desc: 
'''
import torch
from jjzhk.config import ZKCFG
from ELib.backbone.backbone_zoo import BACKBONE_ZOO, BackboneSeg
from collections import OrderedDict
from ELib.utils.block import conv_bn


@BACKBONE_ZOO.register()
def mobilenetv1(cfg:ZKCFG):
    return MobileNet(cfg, cfg.MODEL.BACKBONE)


@BACKBONE_ZOO.register()
def mobilenetv2(cfg:ZKCFG):
    return MobileNet(cfg, cfg.MODEL.BACKBONE)


@BACKBONE_ZOO.register()
def mb2_v3_asff(cfg:ZKCFG):
    return MobileNetV2(cfg)


class MobileNet(BackboneSeg):
    def __init__(self, cfg, type):
        super(MobileNet, self).__init__(cfg)
        self.cfg = cfg
        self.type = type

    def _create_network_(self):
        if self.type.endswith('v1'):
            return self._create_v1_()
        elif self.type.endswith('v2'):
            return self._create_v2_()
        elif self.type.endswith('asff'):
            return self._create_v3_()

    def _create_v1_(self):
        layers = []

        layers += [Mobilenet_conv_bn(3, 32, 2)]
        layers += [Mobilenet_conv_dw(32, 64, 1)]
        layers += [Mobilenet_conv_dw(64, 128, 2)]
        layers += [Mobilenet_conv_dw(128, 128, 1)]
        layers += [Mobilenet_conv_dw(128, 256, 2)]

        layers += [Mobilenet_conv_dw(256, 256, 1)]
        layers += [Mobilenet_conv_dw(256, 512, 2)]

        layers += [Mobilenet_conv_dw(512, 512, 1)]
        layers += [Mobilenet_conv_dw(512, 512, 1)]
        layers += [Mobilenet_conv_dw(512, 512, 1)]
        layers += [Mobilenet_conv_dw(512, 512, 1)]
        layers += [Mobilenet_conv_dw(512, 512, 1)]

        layers += [Mobilenet_conv_dw(512, 1024, 2)]
        layers += [Mobilenet_conv_dw(1024, 1024, 1)]
        return layers

    def _create_v2_(self):
        layers = []

        layers += [Mobilenet_conv_bn(3, 32, 2)]
        layers += [Mobilenet_inverted_residual_bottleneck(32, 16, 1, 1)]
        layers += [Mobilenet_inverted_residual_bottleneck(16, 24, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(24, 24, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(24, 32, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(32, 32, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(32, 32, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(32, 64, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(64, 64, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(64, 64, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(64, 64, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(64, 96, 1, 6)]

        layers += [Mobilenet_inverted_residual_bottleneck(96, 96, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(96, 96, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(96, 160, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(160, 160, 1, 6)]

        layers += [Mobilenet_inverted_residual_bottleneck(160, 160, 1, 6)]
        layers += [Mobilenet_inverted_residual_bottleneck(160, 320, 1, 6)]
        return layers


class Mobilenet_conv_bn(torch.nn.Module):
    def __init__(self, inp, oup, stride):
        super(Mobilenet_conv_bn, self).__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
            torch.nn.BatchNorm2d(oup),
            torch.nn.ReLU(inplace=True),
        )
        self.depth = oup

    def forward(self, x):
        return self.conv(x)


class Mobilenet_conv_dw(torch.nn.Module):
    def __init__(self, inp, oup, stride):
        super(Mobilenet_conv_dw, self).__init__()
        self.conv = torch.nn.Sequential(
            # dw
            torch.nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
            torch.nn.BatchNorm2d(inp),
            torch.nn.ReLU(inplace=True),
            # pw
            torch.nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
            torch.nn.BatchNorm2d(oup),
            torch.nn.ReLU(inplace=True),
        )
        self.depth = oup

    def forward(self, x):
        return self.conv(x)


class Mobilenet_inverted_residual_bottleneck(torch.nn.Module):
    def __init__(self, inp, oup, stride, expand_ratio):
        super(Mobilenet_inverted_residual_bottleneck, self).__init__()
        self.use_res_connect = stride == 1 and inp == oup
        self.conv = torch.nn.Sequential(
            # pw
            torch.nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
            torch.nn.BatchNorm2d(inp * expand_ratio),
            torch.nn.ReLU6(inplace=True),
            # dw
            torch.nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
            torch.nn.BatchNorm2d(inp * expand_ratio),
            torch.nn.ReLU6(inplace=True),
            # pw-linear
            torch.nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
            torch.nn.BatchNorm2d(oup),
        )
        self.depth = oup

    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)


class MobileNetV2(BackboneSeg):
    def __init__(self, cfg):
        super(MobileNetV2, self).__init__(cfg)
        self.cfg = cfg
        self.out_indices = (6, 13, 18)
        self.width_mult = 1.
        self.backbone_outchannels = self.cfg.MODEL.BACKBONE_OUTCHANNELS

        block = InvertedResidual
        interverted_residual_setting = [
            # t, c, n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1],
        ]

        input_channel = _make_divisible(
            32 * self.width_mult, 4 if self.width_mult == 0.1 else 8)
        # 1280
        self.out_indices = self.out_indices
        # self.zero_init_residual = zero_init_residual
        self.last_channel = _make_divisible(
            1280 * self.width_mult, 4 if self.width_mult == 0.1 else 8) if self.width_mult > 1.0 else 1280

        self.features = [conv_bn(3, input_channel, 3, 2, 1)]
        # building inverted residual blocks
        for t, c, n, s in interverted_residual_setting:
            output_channel = _make_divisible(
                c * self.width_mult, 4 if self.width_mult == 0.1 else 8)
            for i in range(n):
                if i == 0:
                    self.features.append(
                        block(input_channel, output_channel, s, expand_ratio=t))
                else:
                    self.features.append(
                        block(input_channel, output_channel, 1, expand_ratio=t))
                input_channel = output_channel
        self.features.append(
            conv_bn(input_channel, self.last_channel, 1, 1, 0))
        self.features = torch.nn.Sequential(*self.features)
        self.init_weights()

    def init_weights(self):
        for m in self.modules():
            if isinstance(m, torch.nn.Conv2d):
                kaiming_init(m)
            elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
                constant_init(m, 1)

    def forward(self, x):
        outs = []
        for i in range(len(self.features)):
            x = self.features[i](x)
            if i in self.out_indices:
                outs.append(x)
        return outs

    def _create_network_(self):
        return self

    def load_init_weights(self, weights):
        statedict = self.state_dict()
        newstatedict = OrderedDict()
        for k, v in self.state_dict().items():
            if 'num_batches_tracked' in k or 'bn_notsave' in k:
                statedict.pop(k)
        for idx, ((k, v), (k2, v2)) in enumerate(zip(statedict.items(), weights.items())):
            newstatedict.update({k: v2})
        for k, v in self.state_dict().items():
            if 'num_batches_tracked' in k or 'bn_notsave' in k:
                newstatedict.update({k: v})
        self.load_state_dict(newstatedict)

def _make_divisible(v, divisor, min_value=None):
    """
    This function is taken from the original tf repo.
    It ensures that all layers have a channel number that is divisible by 8
    It can be seen here:
    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    :param v:
    :param divisor:
    :param min_value:
    :return:
    """
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


def kaiming_init(module,
                 a=0,
                 mode='fan_out',
                 nonlinearity='relu',
                 bias=0,
                 distribution='normal'):
    assert distribution in ['uniform', 'normal']
    if distribution == 'uniform':
        torch.nn.init.kaiming_uniform_(
            module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
    else:
        torch.nn.init.kaiming_normal_(
            module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
    if hasattr(module, 'bias') and module.bias is not None:
        torch.nn.init.constant_(module.bias, bias)


def constant_init(module, val, bias=0):
    if hasattr(module, 'weight') and module.weight is not None:
        torch.nn.init.constant_(module.weight, val)
    if hasattr(module, 'bias') and module.bias is not None:
        torch.nn.init.constant_(module.bias, bias)


class InvertedResidual(torch.nn.Module):
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]
        hidden_dim = round(inp * expand_ratio)
        self.use_res_connect = self.stride == 1 and inp == oup
        if expand_ratio == 1:
            self.conv = torch.nn.Sequential(OrderedDict([
                ('dw_conv',      torch.nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)),
                ('dw_bn',        torch.nn.BatchNorm2d(hidden_dim)),
                ('dw_relu',      torch.nn.ReLU6(inplace=True)),
                ('project_conv', torch.nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)),
                ('project_bn',   torch.nn.BatchNorm2d(oup))
            ]))
        else:
            self.conv = torch.nn.Sequential(OrderedDict(
                [
                    ('expand_conv', torch.nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)),
                    ('expand_bn',   torch.nn.BatchNorm2d(hidden_dim)),
                    ('expand_relu', torch.nn.ReLU6(inplace=True)),
                    ('dw_conv',     torch.nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)),
                    ('dw_bn',       torch.nn.BatchNorm2d(hidden_dim)),
                    ('dw_relu',     torch.nn.ReLU6(inplace=True)),
                    ('project_conv',torch.nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)),
                    ('project_bn',  torch.nn.BatchNorm2d(oup))
                ]
            )
            )

    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)





