import torch
import torchvision
import torch.nn.functional as F


class MobileNetV2(torch.nn.Module):
    def __init__(self, output_stride):
        super().__init__()
        self.features = torchvision.models.mobilenet_v2(pretrained=True).features[:-1]
        if output_stride == 16:
            inverted_residual = self.features[14]
            conv = inverted_residual.conv[1][0]
            conv.stride = (1, 1)
            conv.dilation = (2, 2)
            conv.padding = (2, 2)
        elif output_stride == 8:
            conv_0 = self.features[7].conv[1][0]
            conv_0.stride = (1, 1)
            conv_0.dilation = (2, 2)
            conv_0.padding = (2, 2)
            conv_1 = self.features[14].conv[1][0]
            conv_1.stride = (1, 1)
            conv_1.dilation = (4, 4)
            conv_1.padding = (4, 4)

    def forward(self, x):
        features0 = self.features[0:4](x)
        features1 = self.features[4:](features0)
        return features0, features1


class ConvBatchReLU(torch.nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, padding, dilation_rate):
        super().__init__()
        self.sequential = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=(1, 1), padding=padding,
                            dilation=dilation_rate),
            torch.nn.BatchNorm2d(out_channels),
            torch.nn.ReLU())

    def forward(self, x):
        return self.sequential(x)


class ASPP(torch.nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv0 = ConvBatchReLU(in_channels, out_channels, (1, 1), padding=(0, 0), dilation_rate=1)
        self.conv1 = ConvBatchReLU(in_channels, out_channels, (3, 3), padding=(6, 6), dilation_rate=6)
        self.conv2 = ConvBatchReLU(in_channels, out_channels, (3, 3), padding=(12, 12), dilation_rate=12)
        self.conv3 = ConvBatchReLU(in_channels, out_channels, (3, 3), padding=(18, 18), dilation_rate=18)
        self.conv4 = ConvBatchReLU(in_channels, out_channels, (1, 1), padding=(0, 0), dilation_rate=1)
        self.conv5 = ConvBatchReLU(out_channels * 5, out_channels, (1, 1), padding=(0, 0), dilation_rate=1)
        self.pooling = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))

    def forward(self, x):
        h, w = x.shape[2:]
        x1 = self.conv0(x)
        x2 = self.conv1(x)
        x3 = self.conv2(x)
        x4 = self.conv3(x)
        x5 = F.interpolate(self.pooling(self.conv4(x)), (h, w), mode='bilinear')
        x_ = torch.cat([x1, x2, x3, x4, x5], dim=1)
        return self.conv5(x_)


class DeepLabV3PLUS(torch.nn.Module):
    def __init__(self, output_stride, num_classes):
        super().__init__()
        self.mobile_net_v2 = MobileNetV2(output_stride=output_stride)
        self.decoder = DeepLabV3Decoder(num_classes=num_classes)

    def forward(self, x):
        x1, x2 = self.mobile_net_v2(x)
        return self.decoder(x1, x2)


class DeepLabV3Decoder(torch.nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        self.conv0 = ConvBatchReLU(24, 48, (1, 1), (0, 0), 1)
        self.aspp = ASPP(320, 256)
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(304, 256, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(),
            torch.nn.Dropout(),
            torch.nn.Conv2d(256, 256, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(),
            torch.nn.Dropout(0.1)
        )
        self.conv2 = torch.nn.Conv2d(256, num_classes, kernel_size=(1, 1))

    def forward(self, x1, x2):
        x3 = self.conv0(x1)
        x4 = F.interpolate(self.aspp(x2), scale_factor=4, mode='bilinear')
        return F.interpolate(self.conv2(self.conv1(torch.cat([x3, x4], dim=1))),
                             scale_factor=4, mode='bilinear'
                             )
