import numpy as np
import torch.nn as nn

from common import Decoder, ASPP, AlignedXception,get_inverted_residual_blocks,InvertedResidual,ASPPPlus


def conv3x3(in_channels, out_channels, stride=1, dilation=1):
    kernel_size = np.asarray((3, 3))
    upsampled_kernel_size = (kernel_size - 1) * (dilation - 1) + kernel_size
    full_padding = (upsampled_kernel_size - 1) // 2
    full_padding, kernel_size = tuple(full_padding), tuple(kernel_size)
    return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
                     padding=full_padding, dilation=dilation, bias=False)


# 这里的配置使用的是deeplabv3++
class DeepLab(nn.Module):
    def __init__(self, backbone="aligned_inception", stride=16, num_classes=8, pretrained=False):
        super(DeepLab, self).__init__()
        self.backbone = AlignedXception(stride)
        planes = 128
        self.aspp = ASPP(2048, 256, 16)
        self.decoder = Decoder(planes=planes, num_classes=num_classes)

    def forward(self, x):
        x1, x2 = self.backbone(x)
        x1 = self.aspp(x1)#16，53
        x = self.decoder(x1, x2, x.size()[2:])#255，846

        return x


#这里配置使用的是deeplabv3，以mobilenetv2为骨干网络

class DeepLabV3MobileNetV2(nn.Module):

    def __init__(self, num_classes):
        super(DeepLabV3MobileNetV2, self).__init__()

        # same config params in MobileNetV2
        # each layer channel
        self.c = [32, 16, 24, 32, 64, 96, 160]
        # each layer expansion times
        self.t = [1, 1, 6, 6, 6, 6, 6]
        # each layer expansion stride
        self.s = [2, 1, 2, 2, 2, 1, 1]
        # each layer repeat time
        self.n = [1, 1, 2, 3, 4, 3, 3]
        self.down_sample_rate = 32
        self.output_stride = 16
        self.multi_grid = (1, 2, 4)
        self.aspp = (6, 12, 18)

        # all blocks goes here
        self.blocks = []
        # build MobileNetV2 backbone first
        self.blocks.append(
            nn.Sequential(
                nn.Conv2d(3, self.c[0], 3, stride=self.s[0], padding=1, bias=False),
                nn.BatchNorm2d(self.c[0]),
                nn.ReLU6()
            )
        )

        for i in range(6):
            self.blocks.extend(
                get_inverted_residual_blocks(self.c[i], self.c[i + 1], t=self.t[i + 1], s=self.s[i + 1],
                                             n=self.n[i + 1])
            )

        # dilated conv
        rate = self.down_sample_rate // self.output_stride
        self.blocks.append(InvertedResidual(self.c[6], self.c[6], expand_ratio=self.t[6], stride=1, dilation=rate))
        for i in range(3):
            self.blocks.append(InvertedResidual(self.c[6], self.c[6], expand_ratio=self.t[6], stride=1,
                                                dilation=rate * self.multi_grid[i]))
        # append ASPP layer
        self.blocks.append(ASPPPlus(self.c[-1], self.aspp))

        # last conv layer
        self.blocks.append(nn.Conv2d(256, num_classes, 1))
        self.blocks.append(nn.Upsample(size=(255,846), mode='bilinear', align_corners=False))#resize到与target的同样大小
        self.model = nn.Sequential(*self.blocks)

    def forward(self, x):
        return self.model(x)

