# -*- coding: utf-8 -*-
"""
@project: CloudDetection
@author: Wu Yue
@file: deeplab
@ide: PyCharm
@creat time: 2020-10-12 07:28
@change time:
@function: deeplab v1 v2 v3 v3+
"""

import torch
import torch.nn.modules as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import _LRScheduler
from network.involution import *


class Stem(nn.Module):
    """
    在进入ResLayer之前，先用7*7的空洞卷积核在原图滑动，增大感受野。padding方式设为same，大小不变。
    Pool层的核大小为3，步长为2，这会导致特征图的分辨率发生变化。
    """
    def __init__(self, inCh, outCh):
        super(Stem, self).__init__()

        self.convBnRelu = ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=7, stride=2, padding=3, dilation=1)
        self.mp = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)

    def forward(self, x):
        x = self.convBnRelu(x)
        x = self.mp(x)
        return x


class ConvBnRelu(nn.Module):
    """
    定义空洞卷积+BN+ReLU的组件
    """
    def __init__(self, inCh, outCh, kernelSize, stride, padding, dilation, relu=True):
        super(ConvBnRelu, self).__init__()

        self.conv = nn.Conv2d(in_channels=inCh, out_channels=outCh, kernel_size=kernelSize, stride=stride,
                            padding=padding, dilation=dilation, bias=False)
        self.bn = nn.BatchNorm2d(outCh, eps=1e-5, momentum=0.999)

        if relu:
            self.convBnRelu = nn.Sequential(
                self.conv,
                self.bn,
                nn.ReLU()
            )
        else:
            self.convBnRelu = nn.Sequential(
                self.conv,
                self.bn
            )

    def forward(self, x):
        x = self.convBnRelu(x)
        return x


class Bottleneck(nn.Module):
    """
    瓶颈层 Bottleneck
    定义 Bottleneck，先1*1卷积降维，然后使用3*3卷积，最后再1*1卷积升维，然后再 shortcut 连接。
    降维到多少是由 bottleneckExpansion 参数决定的，这是 ResNet 的 Bottleneck。
    """
    def __init__(self, inCh, outCh, stride, dilation, downSample=False, bottleneckExpansion=4):
        super(Bottleneck, self).__init__()

        midCh = int(outCh / bottleneckExpansion)
        self.reduce = ConvBnRelu(inCh=inCh, outCh=midCh, kernelSize=1, stride=stride, padding=0, dilation=1, relu=True)

        # 将3x3卷积替换为 involution 内卷算子 
        # self.conv3x3 = ConvBnRelu(inCh=midCh, outCh=midCh, kernelSize=3, stride=1, padding=dilation, dilation=dilation,
        #                           relu=True)
        self.invol = involution(midCh, kernel_size=3, stride=1)
        self.bn = nn.BatchNorm2d(midCh, eps=1e-5, momentum=0.999)
        self.conv3x3 = nn.Sequential(
                self.invol,
                self.bn
            )


        self.increase = ConvBnRelu(inCh=midCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1, relu=False)
        self.shortCut = (
            ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=stride, padding=0, dilation=1, relu=False)
            if downSample else lambda x: x
        )

    def forward(self, x):
        h = self.reduce(x)
        h = self.conv3x3(h)
        h = self.increase(h)
        h += self.shortCut(x)
        return F.relu(h)


class ResLayer(nn.Sequential):
    """
    定义ResLayer，整个 DeepLab v1 是用 ResLayer 堆叠起来的，
    下采样是在每个 ResLayer 的第一个 Bottleneck (瓶颈层) 发生的。
    """
    def __init__(self, nLayers, inCh, outCh, stride, dilation, multiGrids=None):
        super(ResLayer, self).__init__()

        if multiGrids is None:
            multiGrids = [1] * nLayers
        else:
            assert nLayers == len(multiGrids)

        for i in range(nLayers):
            self.add_module(
                "block{}".format(i + 1),
                Bottleneck(
                    inCh=(inCh if i==0 else outCh),
                    outCh=outCh,
                    stride=(stride if i==0 else 1),
                    dilation=dilation*multiGrids[i],
                    downSample=(True if i==0 else False),
                    bottleneckExpansion=4
                )
            )


class ImagePool(nn.Module):
    """
    全局平均池化，将得到的图像特征输入到一个拥有256个通道的1*1卷积中，最后将特征进行
    双线性上采样到特定的维度(就是输入到ImagePool之前特征图的维度)
    """
    def __init__(self, inCh, outCh):
        super(ImagePool, self).__init__()

        # 搜到的代码 output_size=1  但这样无法运行
        # 参考 https://github.com/Joyako/DeepLab-v3_plus_PyTorch/blob/master/module/general_network/ASPP.py 将 output_size 改为 4
        self.pool = nn.AdaptiveAvgPool2d(output_size=4)
        self.conv = ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1)

    def forward(self, x):
        _, _, H, W = x.shape
        h = self.pool(x)
        h = self.conv(h)
        h = F.interpolate(h, size=(H, W), mode="bilinear", align_corners=False)
        return h


class ASPPV3(nn.Module):
    """
    Atrous spatial pyramid pooling (ASPP)  for DeepLab v3
    多孔空间金字塔池化（ASPP）
    """

    def __init__(self, inCh, outCh, rates):
        super(ASPPV3, self).__init__()

        self.stages = nn.Module()
        self.stages.add_module("c0", ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=1, stride=1, padding=0, dilation=1))
        for i, rate in enumerate(rates):
            self.stages.add_module(
                "c{}".format(i + 1),
                ConvBnRelu(inCh=inCh, outCh=outCh, kernelSize=3, stride=1, padding=rate, dilation=rate),
            )
        self.stages.add_module("imagepool", ImagePool(inCh, outCh))

    def forward(self, x):
        return torch.cat([stage(x) for stage in self.stages.children()], dim=1)


"""
DeepLab v3+  + RedNet
"""
class DeepLabV3PlusRedNet(nn.Module):
    def __init__(self, nChannels, nClasses, nBlocks, atrousRates, multiGrids, outputStride):
        """

        :param nChannels: 输入的通道数
        :param nClasses: 结果分为几类
        :param nBlocks: 4个数组成的列表， 代表每个 ResLayer 中的 瓶颈层 的层数
        :param atrousRates: ASPP 每层卷积的卷积核的距离
        :param multiGrids:
        :param outputStride: 输出的步幅   8 或 16
        """
        super(DeepLabV3PlusRedNet, self).__init__()

        # Stride and dilation
        if outputStride == 8:
            s = [1, 2, 1, 1]
            d = [1, 1, 2, 4]
        elif outputStride == 16:
            s = [1, 2, 2, 1]
            d = [1, 1, 1, 2]

        # Encoder
        ch = [64 * 2 ** p for p in range(6)]
        concatCh = 256 * (len(atrousRates) + 2)
        self.layer1 = Stem(inCh=nChannels, outCh=ch[0])
        self.layer2 = ResLayer(nLayers=nBlocks[0], inCh=ch[0], outCh=ch[2], stride=s[0], dilation=d[0])
        self.layer3 = ResLayer(nLayers=nBlocks[1], inCh=ch[2], outCh=ch[3], stride=s[1], dilation=d[1])
        self.layer4 = ResLayer(nLayers=nBlocks[2], inCh=ch[3], outCh=ch[4], stride=s[2], dilation=d[2])
        self.layer5 = ResLayer(nLayers=nBlocks[3], inCh=ch[4], outCh=ch[5], stride=s[3], dilation=d[3], multiGrids=multiGrids)
        self.aspp = ASPPV3(inCh=ch[5], outCh=256, rates=atrousRates)
        self.fc1 = ConvBnRelu(inCh=concatCh, outCh=256, kernelSize=1, stride=1, padding=0, dilation=1)

        # Decoder
        self.reduce = ConvBnRelu(inCh=256, outCh=48, kernelSize=1, stride=1, padding=0, dilation=1)
        self.fc2 = nn.Sequential(
            ConvBnRelu(inCh=304, outCh=256, kernelSize=3, stride=1, padding=1, dilation=1),
            ConvBnRelu(inCh=256, outCh=256, kernelSize=3, stride=1, padding=1, dilation=1),
            nn.Conv2d(in_channels=256, out_channels=nClasses, kernel_size=1)
        )

    def forward(self, x):
        h = self.layer1(x)
        h = self.layer2(h)
        h_ = self.reduce(h)
        h = self.layer3(h)
        h = self.layer4(h)
        h = self.layer5(h)
        h = self.aspp(h)
        h = self.fc1(h)
        h = F.interpolate(h, size=h_.shape[2:], mode="bilinear", align_corners=False)
        h = torch.cat((h, h_), dim=1)
        h = self.fc2(h)
        h = F.interpolate(h, size=x.shape[2:], mode="bilinear", align_corners=False)
        return torch.sigmoid(h)
