import math

import torch
import torch.nn.functional as F
from torch import nn


class ConvBNReLU(nn.Sequential):
    def __init__(self, inplanes, planes, kernel_size, padding, dilation):
        super(ConvBNReLU, self).__init__(
            nn.Conv2d(inplanes, planes, kernel_size, stride=1,
                      padding=padding, dilation=dilation, bias=False),
            nn.BatchNorm2d(planes),
            nn.ReLU(inplace=True),
        )


class ASPP(nn.Module):
    """
    Atrous Spatial Pyramid Pooling.
    Ref:
        Rethinking Atrous Convolution for Semantic Image Segmentation
    """

    def __init__(self, inplanes=2048, planes=256, stride=16):
        super(ASPP, self).__init__()
        '''
        stride 8 和 stride 16 的重要区别在于后续形成的计算量差异
        '''
        if stride == 8:
            dilation = [12, 24, 36]
        elif stride == 16:
            dilation = [6, 12, 18]
        else:
            raise NotImplementedError

        self.block1 = ConvBNReLU(inplanes, planes, 1, 0, 1)  # inchannel,outchannel,kernel,padding,dilation
        self.block2 = ConvBNReLU(inplanes, planes, 3, dilation[0], dilation[0])
        self.block3 = ConvBNReLU(inplanes, planes, 3, dilation[1], dilation[1])
        self.block4 = ConvBNReLU(inplanes, planes, 3, dilation[2], dilation[2])

        self.block5 = nn.Sequential(
            nn.AdaptiveAvgPool2d(4),
            ConvBNReLU(inplanes, planes, 1, 0, 1),
        )

        self.conv = ConvBNReLU(planes * 5, planes, 1, 0, 1)
        self.dropout = nn.Dropout(0.5)

        self._init_weight()

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):
        h1 = self.block1(x)
        h2 = self.block2(x)
        h3 = self.block3(x)
        h4 = self.block4(x)
        h5 = self.block5(x)
        h5 = F.interpolate(h5, size=x.size()[2:], mode='bilinear', align_corners=True)  # 这个可以替代crf

        x = torch.cat((h1, h2, h3, h4, h5), dim=1)
        x = self.conv(x)
        x = self.dropout(x)  # 这里并没有bn和dropout连用，所以没有问题
        return x


class Decoder(nn.Module):
    """
    Ref:
        Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation.
    """

    def __init__(self, planes=128, num_classes=3):
        super(Decoder, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(planes, planes, 1, bias=False),
            nn.BatchNorm2d(planes),
            nn.ReLU(inplace=True),
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(planes + 256, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, num_classes, 1),  # kernel为1
        )

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x1, x2, output_size):
        """

        :param x1:
        :param x2: low level feature
        :return:
        """
        out1 = self.conv1(x2)
        out0 = F.interpolate(x1, size=x2.size()[2:], mode='bilinear', align_corners=True)
        out = torch.cat((out0, out1), dim=1)
        out = self.conv2(out)
        out = F.interpolate(out, size=output_size, mode='bilinear', align_corners=True)

        return out


class SeparableConv2d(nn.Module):
    """
    Depth Separable Convolution.
    """

    def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
        super(SeparableConv2d, self).__init__()
        padding = (kernel_size - 1) * dilation // 2
        self.depth_wise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding,
                                    dilation, groups=inplanes, bias=bias)
        # self.bn = nn.BatchNorm2d(inplanes)
        # inchannel outchannel kernel stride  padding dilation groups bias
        self.point_wise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias)

    def forward(self, x):
        x = self.depth_wise(x)
        # x = self.bn(x)
        x = self.point_wise(x)

        return x


class BasicConv2d(nn.Module):
    def __init__(self, inplanes, planes, stride=1, dilation=1):
        super(BasicConv2d, self).__init__()
        self.features = nn.Sequential(
            SeparableConv2d(inplanes, planes, 3, stride=stride, dilation=dilation),
            nn.ReLU(inplace=True),
            SeparableConv2d(planes, planes, 3, stride=1, dilation=dilation),
            nn.ReLU(inplace=True),
            SeparableConv2d(planes, planes, 3, stride=1, dilation=dilation)
        )

        self.downsample = None
        if inplanes != planes or stride != 1:
            self.downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False),
                nn.BatchNorm2d(planes),
            )

    def forward(self, x):
        identity = x
        x = self.features(x)

        if self.downsample is not None:
            identity = self.downsample(identity)

        x = x + identity

        return x


class AlignedXception(nn.Module):
    """
    Ref:
        Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation.
    """

    def __init__(self, stride=16):
        """

        :param stride: Multiples of image down-sampling. The default value is 16(DeepLab v3+) or
        it can be set to 8(DeepLab v3).
        """
        super(AlignedXception, self).__init__()
        if stride == 8:
            self.stride = [1, 1]
            self.dilation = [4, 4]
        elif stride == 16:
            self.stride = [2, 1]
            self.dilation = [2, 2]
        elif stride == 32:
            self.stride = [2, 2]
            self.dilation = [1, 1]
        else:
            raise NotImplementedError

        # Entry flow
        self.stem = nn.Sequential(
            nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )
        self.stage1 = nn.Sequential(
            BasicConv2d(64, 128, 2),
            nn.ReLU(inplace=True),
        )
        self.stage2 = BasicConv2d(128, 256, 2)
        self.stage3 = BasicConv2d(256, 728, self.stride[0])

        # Middle flow
        layers = []
        for _ in range(16):
            layers.append(BasicConv2d(728, 728, stride=1, dilation=self.dilation[0]))
        self.stage4 = nn.Sequential(*layers)

        # Exit flow
        self.stage5 = nn.Sequential(
            BasicConv2d(728, 1024, stride=self.stride[1], dilation=self.dilation[1]),
            nn.ReLU(inplace=True),
            SeparableConv2d(1024, 1536, dilation=self.dilation[1]),
            nn.BatchNorm2d(1536),
            nn.ReLU(inplace=True),
            SeparableConv2d(1536, 1536, dilation=self.dilation[1]),
            nn.BatchNorm2d(1536),
            nn.ReLU(inplace=True),
            SeparableConv2d(1536, 2048, dilation=self.dilation[1]),
            nn.BatchNorm2d(2048),
            nn.ReLU(inplace=True),
        )

        self._init_weight()

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):
        """

        :param x:
        :return:
            result: Output two feature map to skip connect.
        """
        x = self.stem(x)
        x = self.stage1(x)
        low_level_features = x
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)
        x = self.stage5(x)

        return x, low_level_features


class ASPPPlus(nn.Module):
    def __init__(self, input_channel, aspp_list):
        super(ASPPPlus, self).__init__()
        self.conv11 = nn.Sequential(nn.Conv2d(input_channel, 256, 1, bias=False),
                                    nn.BatchNorm2d(256))
        self.conv33_1 = nn.Sequential(nn.Conv2d(input_channel, 256, 3,
                                                padding=aspp_list[0], dilation=aspp_list[0], bias=False),
                                      nn.BatchNorm2d(256))
        self.conv33_2 = nn.Sequential(nn.Conv2d(input_channel, 256, 3,
                                                padding=aspp_list[1], dilation=aspp_list[1], bias=False),
                                      nn.BatchNorm2d(256))
        self.conv33_3 = nn.Sequential(nn.Conv2d(input_channel, 256, 3,
                                                padding=aspp_list[2], dilation=aspp_list[2], bias=False),
                                      nn.BatchNorm2d(256))
        self.concate_conv = nn.Sequential(nn.Conv2d(256 * 5, 256, 1, bias=False),
                                          nn.BatchNorm2d(256))

    def forward(self, x):
        conv11 = self.conv11(x)
        conv33_1 = self.conv33_1(x)
        conv33_2 = self.conv33_2(x)
        conv33_3 = self.conv33_3(x)

        # image pool and upsample
        image_pool = nn.AvgPool2d(kernel_size=x.size()[2:])
        image_pool = image_pool(x)
        image_pool = self.conv11(image_pool)
        upsample = nn.Upsample(size=x.size()[2:], mode='bilinear', align_corners=True)
        upsample = upsample(image_pool)
        # concate
        concate = torch.cat([conv11, conv33_1, conv33_2, conv33_3, upsample], dim=1)
        return self.concate_conv(concate)


# define some util blocks
def conv_bn(x, output, stride):
    return nn.Sequential(
        nn.Conv2d(x, output, 3, stride, 1, bias=False),
        nn.BatchNorm2d(output),
        nn.ReLU6(inplace=True)
    )


def conv_1x1_bn(x, output):
    return nn.Sequential(
        nn.Conv2d(x, output, 1, 1, 0, bias=False),
        nn.BatchNorm2d(output),
        nn.ReLU6(inplace=True)
    )


class InvertedResidual(nn.Module):

    def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1):
        """
        expand ratio should be 6
        dialation default to be 1
        stride should be 1 or 2
        """
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2], 'InsertedResidual stride must be 1 or 2, can not be changed'
        self.use_res_connect = self.stride == 1 and in_channels == out_channels

        # this convolution is the what we called Depth wise separable convolution
        # consist of pw and dw process, which is transfer channel and transfer shape in 2 steps
        self.conv = nn.Sequential(
            # pw
            nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
            nn.BatchNorm2d(in_channels * expand_ratio),
            nn.ReLU6(inplace=True),
            # dw
            nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, padding=dilation,
                      groups=in_channels * expand_ratio,
                      dilation=dilation, bias=False),
            nn.BatchNorm2d(in_channels * expand_ratio),
            nn.ReLU6(inplace=True),
            # pw linear
            nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
            nn.BatchNorm2d(out_channels),
        )

    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)


class MobileNetV2(nn.Module):
    """
    implementation of MobileNetV2
    """

    def __init__(self, num_classes=20, input_size=224, width_mult=1.):
        """
        we just need classes, input_size and width_multiplier here
        the input_size must be dividable by 32, such as 224, 480, 512, 640, 960 etc.
        the width multiplier is width scale which can be 1 or less than 1
        we will judge this value to determine the last channel and input channel
        but why?
        Here is the reason for this:
        You can set input channel to 32, and the output of MobileNetV2 must be 1280
        so, when you multiply that channel, accordingly output should also be multiplied
        :param num_classes:
        :param input_size:
        :param width_mult:
        """
        super(MobileNetV2, self).__init__()

        assert input_size % 32 == 0, 'input_size must be divided by 32, such as 224, 480, 512 etc.'
        input_channel = int(32 * width_mult)
        self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
        self.features = [conv_bn(3, input_channel, 2)]

        # t:  c: channel, n: , s: stride
        self.inverted_residual_setting = [
            # t, c, n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1],
        ]
        for t, c, n, s in self.inverted_residual_setting:
            output_channel = int(c * width_mult)
            for i in range(n):
                if i == 0:
                    self.features.append(InvertedResidual(input_channel, output_channel, s, t))
                else:
                    self.features.append(InvertedResidual(input_channel, output_channel, 1, t))
                input_channel = output_channel

        # build last several layers
        self.features.append(conv_1x1_bn(input_channel, self.last_channel))

        # this why input must can be divided by 32
        self.features.append(nn.AvgPool2d(int(input_size / 32)))
        self.features = nn.Sequential(*self.features)

        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(self.last_channel, num_classes)
        )
        self._init_weights()

    def forward(self, x):
        x = self.features(x)
        x = x.view(-1, self.last_channel)
        x = self.classifier(x)
        return x

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    n.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                n = m.weight.size(1)
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()


# for build MobileNetV2 dynamically
def get_inverted_residual_blocks(in_, out_, t=6, s=1, n=1):
    block = []
    block.append(InvertedResidual(in_, out_, expand_ratio=t, stride=s))
    for i in range(n - 1):
        block.append(InvertedResidual(out_, out_, 1, t))
    return block


