# -*- coding: utf-8 -*-
"""
@project: CloudDetection
@author: Wu Yue
@file: deeplab
@ide: PyCharm
@creat time: 2020-10-12 07:28
@change time:
@function: deeplab v1 v2 v3 v3+
"""

import torch
import torch.nn.modules as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import _LRScheduler


"""
DeepLab v1
"""
class DeepLabV1(nn.Module):
    def __init__(self, nChannels, nClasses, nBlocks, originReso=False):
        """

        :param nChannels: 输入的通道数
        :param nClasses: 结果分为几类
        :param nBlocks: 4个数组成的列表， 代表每个 ResLayer 中的 瓶颈层 的层数
        :param originReso: 输出的结果是否插值到原始分辨率
        """
        super(DeepLabV1, self).__init__()

        self.originReso = originReso
        ch = [64 * 2 ** p for p in range(6)]
        self.modelV1 = nn.Sequential(
            Stem(inCh=nChannels, outCh=ch[0]),
            ResLayer(nLayers=nBlocks[0], inCh=ch[0], outCh=ch[2], stride=1, dilation=1),
            ResLayer(nLayers=nBlocks[1], inCh=ch[2], outCh=ch[3], stride=2, dilation=1),
            ResLayer(nLayers=nBlocks[2], inCh=ch[3], outCh=ch[4], stride=1, dilation=2),
            ResLayer(nLayers=nBlocks[3], inCh=ch[4], outCh=ch[5], stride=1, dilation=4),
            nn.Conv2d(in_channels=ch[5], out_channels=nClasses, kernel_size=1),
        )

    def forward(self, x):
        size = x.shape[-2:]
        x = self.modelV1(x)
        if self.originReso:
            x = F.interpolate(input=x, size=size, mode="bilinear", align_corners=False)  # 插值到输入图像分辨率
        return torch.sigmoid(x)  # 保证 x 取值在 0-1 之间


class Stem(nn.Module):
    """
    在进入ResLayer之前，先用7*7的空洞卷积核在原图滑动，增大感受野。padding方式设为same，大小不变。
    Pool层的核大小为3，步长为2，这会导致特征图的分辨率发生变化。
    """
    def __init__(self, inCh, outCh):
        super(Stem, self).__init__()

        self.convBnRelu = ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=7, stride=2, padding=3, dilation=1)
        self.mp = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)

    def forward(self, x):
        x = self.convBnRelu(x)
        x = self.mp(x)
        return x


class ConvBnRelu(nn.Module):
    """
    定义空洞卷积+BN+ReLU的组件
    """
    def __init__(self, inCh, outCh, kernelSize, stride, padding, dilation, relu=True):
        super(ConvBnRelu, self).__init__()
        
        self.conv = nn.Conv2d(in_channels=inCh, out_channels=outCh, kernel_size=kernelSize, stride=stride,
                              padding=padding, dilation=dilation, bias=False)
        self.bn = nn.BatchNorm2d(outCh, eps=1e-5, momentum=0.999)

        if relu:
            self.convBnRelu = nn.Sequential(
                self.conv,
                self.bn,
                nn.ReLU()
            )
        else:
            self.convBnRelu = nn.Sequential(
                self.conv,
                self.bn
            )

    def forward(self, x):
        x = self.convBnRelu(x)
        return x


class Bottleneck(nn.Module):
    """
    瓶颈层 Bottleneck
    定义 Bottleneck，先1*1卷积降维，然后使用3*3卷积，最后再1*1卷积升维，然后再 shortcut 连接。
    降维到多少是由 bottleneckExpansion 参数决定的，这是 ResNet 的 Bottleneck。
    """
    def __init__(self, inCh, outCh, stride, dilation, downSample=False, bottleneckExpansion=4):
        super(Bottleneck, self).__init__()

        midCh = int(outCh / bottleneckExpansion)
        self.reduce = ConvBnRelu(inCh=inCh, outCh=midCh, kernelSize=1, stride=stride, padding=0, dilation=1, relu=True)
        
        self.conv3x3 = ConvBnRelu(inCh=midCh, outCh=midCh, kernelSize=3, stride=1, padding=dilation, dilation=dilation,
                                  relu=True)
        self.increase = ConvBnRelu(inCh=midCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1, relu=False)
        self.shortCut = (
            ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=stride, padding=0, dilation=1, relu=False)
            if downSample else lambda x: x
        )

    def forward(self, x):
        h = self.reduce(x)
        h = self.conv3x3(h)
        h = self.increase(h)
        h += self.shortCut(x)
        return F.relu(h)


class ResLayer(nn.Sequential):
    """
    定义ResLayer，整个 DeepLab v1 是用 ResLayer 堆叠起来的，
    下采样是在每个 ResLayer 的第一个 Bottleneck (瓶颈层) 发生的。
    """
    def __init__(self, nLayers, inCh, outCh, stride, dilation, multiGrids=None):
        super(ResLayer, self).__init__()

        if multiGrids is None:
            multiGrids = [1] * nLayers
        else:
            assert nLayers == len(multiGrids)

        for i in range(nLayers):
            self.add_module(
                "block{}".format(i + 1),
                Bottleneck(
                    inCh=(inCh if i==0 else outCh),
                    outCh=outCh,
                    stride=(stride if i==0 else 1),
                    dilation=dilation*multiGrids[i],
                    downSample=(True if i==0 else False),
                    bottleneckExpansion=4
                )
            )


"""
DeepLab v2
"""
class DeepLabV2(nn.Module):
    def __init__(self, nChannels, nClasses, nBlocks, atrousRates, originReso=False):
        """

        :param nChannels: 输入的通道数
        :param nClasses: 结果分为几类
        :param nBlocks: 4个数组成的列表， 代表每个 ResLayer 中的 瓶颈层 的层数
        :param atrousRates: ASPP 每层卷积的卷积核的距离
        :param originReso: 输出的结果是否插值到原始分辨率
        """
        super(DeepLabV2, self).__init__()

        self.originReso = originReso
        ch = [64 * 2 ** p for p in range(6)]
        self.modelV2 = nn.Sequential(
            Stem(inCh=nChannels, outCh=ch[0]),
            ResLayer(nLayers=nBlocks[0], inCh=ch[0], outCh=ch[2], stride=1, dilation=1),
            ResLayer(nLayers=nBlocks[1], inCh=ch[2], outCh=ch[3], stride=2, dilation=1),
            ResLayer(nLayers=nBlocks[2], inCh=ch[3], outCh=ch[4], stride=1, dilation=2),
            ResLayer(nLayers=nBlocks[3], inCh=ch[4], outCh=ch[5], stride=1, dilation=4),
            ASPPV2(inCh=ch[5], outCh=nClasses, rates=atrousRates)
        )

    def forward(self, x):
        size = x.shape[-2:]
        x = self.modelV2(x)
        if self.originReso:
            x = F.interpolate(input=x, size=size, mode="bilinear", align_corners=False)  # 插值到输入图像分辨率
        return torch.sigmoid(x)  # 保证 x 取值在 0-1 之间


class ASPPV2(nn.Module):
    """
    Atrous spatial pyramid pooling (ASPP)  for DeepLab v2
    多孔空间金字塔池化（ASPP）
    """
    def __init__(self, inCh, outCh, rates):
        super(ASPPV2, self).__init__()

        for i, rate in enumerate(rates):
            self.add_module(
                "c{}".format(i),
                nn.Conv2d(in_channels=inCh, out_channels=outCh, kernel_size=3, stride=1, padding=rate, dilation=rate,
                          bias=True)
            )

        for m in self.children():
            torch.nn.init.normal_(m.weight, mean=0, std=0.01)
            torch.nn.init.constant_(m.bias, 0)

    def forward(self, x):
        return sum([stage(x) for stage in self.children()])


class PolynomialLR(_LRScheduler):
    """
    DeepLab v2 的学习率
    训练的时候，DeepLabV2的学习率采用了Poly的策略
    https://zhuanlan.zhihu.com/p/91047091?from_voters_page=true
    """
    def __init__(self, optimizer, step_size, iter_max, power, last_epoch=-1):
        self.step_size = step_size
        self.iter_max = iter_max
        self.power = power
        super(PolynomialLR, self).__init__(optimizer, last_epoch)

    def polynomial_decay(self, lr):
        return lr * (1 - float(self.last_epoch) / self.iter_max) ** self.power

    def get_lr(self):
        if (
            (self.last_epoch == 0)
            or (self.last_epoch % self.step_size != 0)
            or (self.last_epoch > self.iter_max)
        ):
            return [group["lr"] for group in self.optimizer.param_groups]
        return [self.polynomial_decay(lr) for lr in self.base_lrs]


"""
DeepLab v3
"""
class DeepLabV3(nn.Module):
    def __init__(self, nChannels, nClasses, nBlocks, atrousRates, multiGrids, outputStride, originReso=False):
        """

        :param nChannels: 输入的通道数
        :param nClasses: 结果分为几类
        :param nBlocks: 4个数组成的列表， 代表每个 ResLayer 中的 瓶颈层 的层数
        :param atrousRates: ASPP 每层卷积的卷积核的距离
        :param multiGrids:
        :param outputStride: 输出的步幅   8 或 16
        :param originReso: 输出的结果是否插值到原始分辨率
        """
        super(DeepLabV3, self).__init__()

        self.originReso = originReso
        ch = [64 * 2 ** p for p in range(6)]

        # Stride and dilation
        if outputStride == 8:
            s = [1, 2, 1, 1]
            d = [1, 1, 2, 4]
        elif outputStride == 16:
            s = [1, 2, 2, 1]
            d = [1, 1, 1, 2]

        concatCh = 256 * (len(atrousRates) + 2)

        self.modelV3 = nn.Sequential(
            Stem(inCh=nChannels, outCh=ch[0]),
            ResLayer(nLayers=nBlocks[0], inCh=ch[0], outCh=ch[2], stride=s[0], dilation=d[0]),
            ResLayer(nLayers=nBlocks[1], inCh=ch[2], outCh=ch[3], stride=s[1], dilation=d[1]),
            ResLayer(nLayers=nBlocks[2], inCh=ch[3], outCh=ch[4], stride=s[2], dilation=d[2]),
            ResLayer(nLayers=nBlocks[3], inCh=ch[4], outCh=ch[5], stride=s[3], dilation=d[3], multiGrids=multiGrids),
            ASPPV3(inCh=ch[5], outCh=256, rates=atrousRates),
            ConvBnRelu(inCh=concatCh, outCh=256, kernelSize=1, stride=1, padding=0, dilation=1),
            nn.Conv2d(in_channels=256, out_channels=nClasses, kernel_size=1)
        )

    def forward(self, x):
        size = x.shape[-2:]
        x = self.modelV3(x)
        if self.originReso:
            x = F.interpolate(input=x, size=size, mode="bilinear", align_corners=False)  # 插值到输入图像分辨率
        return torch.sigmoid(x)  # 保证 x 取值在 0-1 之间


class ImagePool(nn.Module):
    """
    全局平均池化，将得到的图像特征输入到一个拥有256个通道的1*1卷积中，最后将特征进行
    双线性上采样到特定的维度(就是输入到ImagePool之前特征图的维度)
    """
    def __init__(self, inCh, outCh):
        super(ImagePool, self).__init__()

        # 搜到的代码 output_size=1  但这样无法运行
        # 参考 https://github.com/Joyako/DeepLab-v3_plus_PyTorch/blob/master/module/general_network/ASPP.py 将 output_size 改为 4
        self.pool = nn.AdaptiveAvgPool2d(output_size=4)
        self.conv = ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1)

    def forward(self, x):
        _, _, H, W = x.shape
        h = self.pool(x)
        h = self.conv(h)
        h = F.interpolate(h, size=(H, W), mode="bilinear", align_corners=False)
        return h


class ASPPV3(nn.Module):
    """
    Atrous spatial pyramid pooling (ASPP)  for DeepLab v3
    多孔空间金字塔池化（ASPP）
    """

    def __init__(self, inCh, outCh, rates):
        super(ASPPV3, self).__init__()

        self.stages = nn.Module()
        self.stages.add_module("c0", ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1))
        for i, rate in enumerate(rates):
            self.stages.add_module(
                "c{}".format(i + 1),
                ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=3, stride=1, padding=rate, dilation=rate),
            )
        self.stages.add_module("imagepool", ImagePool(inCh, outCh))

    def forward(self, x):
        return torch.cat([stage(x) for stage in self.stages.children()], dim=1)


"""
DeepLab v3+
"""
class DeepLabV3Plus(nn.Module):
    def __init__(self, nChannels, nClasses, nBlocks, atrousRates, multiGrids, outputStride):
        """

        :param nChannels: 输入的通道数
        :param nClasses: 结果分为几类
        :param nBlocks: 4个数组成的列表， 代表每个 ResLayer 中的 瓶颈层 的层数
        :param atrousRates: ASPP 每层卷积的卷积核的距离
        :param multiGrids:
        :param outputStride: 输出的步幅   8 或 16
        """
        super(DeepLabV3Plus, self).__init__()

        # Stride and dilation
        if outputStride == 8:
            s = [1, 2, 1, 1]
            d = [1, 1, 2, 4]
        elif outputStride == 16:
            s = [1, 2, 2, 1]
            d = [1, 1, 1, 2]

        # Encoder
        ch = [64 * 2 ** p for p in range(6)]
        concatCh = 256 * (len(atrousRates) + 2)
        self.layer1 = Stem(inCh=nChannels, outCh=ch[0])
        self.layer2 = ResLayer(nLayers=nBlocks[0], inCh=ch[0], outCh=ch[2], stride=s[0], dilation=d[0])
        self.layer3 = ResLayer(nLayers=nBlocks[1], inCh=ch[2], outCh=ch[3], stride=s[1], dilation=d[1])
        self.layer4 = ResLayer(nLayers=nBlocks[2], inCh=ch[3], outCh=ch[4], stride=s[2], dilation=d[2])
        self.layer5 = ResLayer(nLayers=nBlocks[3], inCh=ch[4], outCh=ch[5], stride=s[3], dilation=d[3], multiGrids=multiGrids)
        self.aspp = ASPPV3(inCh=ch[5], outCh=256, rates=atrousRates)
        self.fc1 = ConvBnRelu(inCh=concatCh, outCh=256, kernelSize=1, stride=1, padding=0, dilation=1)

        # Decoder
        self.reduce = ConvBnRelu(inCh=256, outCh=48, kernelSize=1, stride=1, padding=0, dilation=1)
        self.fc2 = nn.Sequential(
            ConvBnRelu(inCh=304, outCh=256, kernelSize=3, stride=1, padding=1, dilation=1),
            ConvBnRelu(inCh=256, outCh=256, kernelSize=3, stride=1, padding=1, dilation=1),
            nn.Conv2d(in_channels=256, out_channels=nClasses, kernel_size=1)
        )

    def forward(self, x):
        h = self.layer1(x)
        h = self.layer2(h)
        h_ = self.reduce(h)
        h = self.layer3(h)
        h = self.layer4(h)
        h = self.layer5(h)
        h = self.aspp(h)
        h = self.fc1(h)
        h = F.interpolate(h, size=h_.shape[2:], mode="bilinear", align_corners=False)
        h = torch.cat((h, h_), dim=1)
        h = self.fc2(h)
        h = F.interpolate(h, size=x.shape[2:], mode="bilinear", align_corners=False)
        return torch.sigmoid(h)
        # return h


if __name__ == '__main__':
    # model = DeepLabV1(nChannels=3, nClasses=21, nBlocks=[3, 4, 23, 3])
    # model = DeepLabV2(nChannels=3, nClasses=21, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24])
    # model = DeepLabV3(nChannels=3, nClasses=21, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                   multiGrids=[1, 2, 4], outputStride=8)
    model = DeepLabV3Plus(nChannels=3, nClasses=21, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
                          multiGrids=[1, 2, 4], outputStride=16)

    # model.eval()
    image = torch.randn(1, 3, 512, 512)
    image = torch.full_like(image, 1.0)

    # print(model)
    print("input:", image.shape)
    print("output:", model(image).shape)
