'''
Author: caishuyang
Date: 2023-03-12 15:13:36
LastEditors: caishuyang
LastEditTime: 2023-03-13 21:33:11
Description: deeplabv3+网络
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from PIL import Image
from torchvision import transforms
import numpy as np


class ASPP(nn.Module):
    '''
    空洞金字塔池化模块
    '''
    def __init__(self, inputnum, outputnum, reduce_ratio):
        super(ASPP, self).__init__()

        if reduce_ratio == 16:
            void_combine = [1, 6, 12, 18]

        elif reduce_ratio == 8:
            void_combine = [1, 12, 24, 36]

        self.ASPP_conv1_1 = nn.Sequential(
            nn.Conv2d(inputnum, outputnum, kernel_size=1, stride=1,
                      padding=0, dilation=void_combine[0], bias=False),
            nn.BatchNorm2d(outputnum),
            nn.ReLU()
        )

        self.ASPP_conv1_3 = nn.Sequential(
            nn.Conv2d(inputnum, outputnum, kernel_size=3, stride=1,
                      padding=void_combine[1], dilation=void_combine[1], bias=False),
            nn.BatchNorm2d(outputnum),
            nn.ReLU()
        )

        self.ASPP_conv2_3 = nn.Sequential(
            nn.Conv2d(inputnum, outputnum, kernel_size=3, stride=1,
                      padding=void_combine[2], dilation=void_combine[2], bias=False),
            nn.BatchNorm2d(outputnum),
            nn.ReLU()
        )

        self.ASPP_conv3_3 = nn.Sequential(
            nn.Conv2d(inputnum, outputnum, kernel_size=3, stride=1,
                      padding=void_combine[3], dilation=void_combine[3], bias=False),
            nn.BatchNorm2d(outputnum),
            nn.ReLU()
        )

        self.globalpool = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Conv2d(inputnum, outputnum, kernel_size=1,
                      stride=1, padding=0, bias=False),
            nn.BatchNorm2d(outputnum),
            nn.ReLU()
        )

        self.afterconv = nn.Sequential(
            nn.Conv2d(outputnum*5, 256, 1, bias=False),
            nn.BatchNorm2d(256)
        )

        self._initweight()

    def forward(self, x):
        aftcov1_1 = self.ASPP_conv1_1(x)
        aftcov1_3 = self.ASPP_conv1_3(x)
        aftcov2_3 = self.ASPP_conv2_3(x)
        aftcov3_3 = self.ASPP_conv3_3(x)
        aftglobalpool = self.globalpool(x)
        aftglobalpool = F.interpolate(aftglobalpool, size=aftcov3_3.size()[
                                      2:], mode='bilinear', align_corners=True)

        montage = torch.cat((aftcov1_1, aftcov1_3, aftcov2_3,
                            aftcov3_3, aftglobalpool), dim=1)
        output = self.afterconv(montage)
        return output

    def _initweight(self):
        '''
        初始化权重
        '''
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()


def padding_set(inputimg, convsize, voidratio):
    '''
    深度卷积中加入空洞卷积后的padding设置
    '''
    void_convsize = convsize + (convsize - 1) * (voidratio - 1)
    padding_total = void_convsize - 1
    padding_begin = padding_total // 2
    padding_end = padding_total - padding_begin
    padimg = F.pad(inputimg, (padding_begin, padding_end,
                   padding_begin, padding_end))

    return padimg


class Dsc(nn.Module):
    '''
    深度可分离卷积
    '''
    def __init__(self, inputnum, outputnum, convshape=3, step=1, voidratio=1, bias=False):
        super(Dsc, self).__init__()

        self.deepconv = nn.Conv2d(inputnum, inputnum, kernel_size=convshape,
                                  stride=step, padding=0, dilation=voidratio, groups=inputnum, bias=bias)
        self.PbPconv = nn.Conv2d(inputnum, outputnum, 1, 1, 0, 1, 1, bias=bias)

    def forward(self, x):
        padding = padding_set(
            x, self.deepconv.kernel_size[0], self.deepconv.dilation[0])
        deepconv_op_ = self.deepconv(padding)
        PbPconv_op_ = self.PbPconv(deepconv_op_)
        return PbPconv_op_


class XceptionPart(nn.Module):
    def __init__(self, inputnum, outputnum, opnum, step=1, voidratio=1, isRELU=True, isOut=False):
        super(XceptionPart, self).__init__()

        if outputnum != inputnum or step != 1:
            self.skip_link = nn.Sequential(
                nn.Conv2d(inputnum, outputnum, 1, stride=step, bias=False),
                nn.BatchNorm2d(outputnum)
            )
        else:
            self.skip_link = None

        '''
        这边注意一下，
        所有的 ReLu 的 inplace 必须设定为 False,
        确保原变量不会被覆盖，保证方向传播能够执行
        '''
        self.relu = nn.ReLU(inplace=False)
        modulelis = []

        modulelis.append(self.relu)
        modulelis.append(Dsc(inputnum, outputnum, 3,
                         step=1, voidratio=voidratio))
        modulelis.append(nn.BatchNorm2d(outputnum))
        out_innum = outputnum

        for _ in range(opnum - 1):
            modulelis.append(self.relu)
            modulelis.append(Dsc(out_innum, out_innum, 3, step=1, voidratio=1))
            modulelis.append(nn.BatchNorm2d(out_innum))

        if not isRELU:
            modulelis[1:]

        if step != 1:
            modulelis.append(Dsc(outputnum, outputnum, 3, step=2))

        if step == 1 and isOut:
            modulelis.append(Dsc(outputnum, outputnum, 3, step=1))

        self.modulelis = nn.Sequential(*modulelis)

    def forward(self, input):
        x = self.modulelis(input)

        if self.skip_link is not None:
            skip_link = self.skip_link(input)
        else:
            skip_link = input
        '''
        这边稍微注意一下，
        必须将x和跳跃连接(skip_link)之和赋值给一个新变量，
        不能原地修改(如 x=x+skip_link),
        否则原变量x的梯度信息会丢失，无法反向传播
        '''
        result = skip_link+x

        return result


class ExittModule(nn.Module):
    def __init__(self, inputnum, outputnum, step=1, voidratio=1):
        super(ExittModule, self).__init__()

        if outputnum != inputnum or step != 1:
            self.skip_link = nn.Sequential(
                nn.Conv2d(inputnum, outputnum, 1, stride=step, bias=False),
                nn.BatchNorm2d(outputnum)
            )
        else:
            self.skip_link = None

        self.relu = nn.ReLU(inplace=False)
        self.midconv = Dsc(inputnum, inputnum, step=step, voidratio=voidratio)
        self.outputlayer = Dsc(inputnum, outputnum, step=step, voidratio=1)

    def forward(self, input):

        x = self.midconv(input)
        x = self.relu(x)

        x = self.outputlayer(x)
        x = self.relu(x)

        if self.skip_link is not None:
            skip_link = self.skip_link(input)
        else:
            skip_link = input
        '''
        这边和上面提到的一样，
        必须将x和跳跃连接(skip_link)之和赋值给一个新变量，
        不能原地修改(如 x=x+skip_link),
        否则原变量x的梯度信息会丢失，无法反向传播
        (再次强调)
        '''
        result = skip_link+x
        return result


class Xception(nn.Module):
    def __init__(self, inputnum=3, reduce_ratio=16):
        super(Xception, self).__init__()

        if reduce_ratio == 16:
            in_3_step = 2
            modulevoidr = 2
            ExittModule的voidratio = (1, 2)
        elif reduce_ratio == 8:
            in_3_step = 1
            modulevoidr = 2
            ExittModule的voidratio = (2, 4)

        else:
            raise NotImplementedError

        self.inconv1 = nn.Conv2d(
            inputnum, 32, 3, stride=2, padding=1, bias=False)
        self.inBN1 = nn.BatchNorm2d(32)
        self.relu = nn.ReLU(inplace=False)

        self.inconv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
        self.inBN2 = nn.BatchNorm2d(64)

        self.instack1 = XceptionPart(64, 128, opnum=2, step=2, isRELU=False)
        self.instack2 = XceptionPart(128, 256, opnum=2, step=2, isRELU=True)
        self.instack3 = XceptionPart(
            256, 728, opnum=2, step=in_3_step, isRELU=True)

        midstack1 = [XceptionPart(
            728, 728, opnum=3, step=1, voidratio=modulevoidr, isRELU=True)] * 16
        self.midstack1 = nn.Sequential(*midstack1)

        self.ExittModule = ExittModule(
            728, 1024, step=1, voidratio=ExittModule的voidratio[1])

        self.exitconv_op_1 = nn.Sequential(
            Dsc(1024, 1536, voidratio=ExittModule的voidratio[1]),
            nn.BatchNorm2d(1536),
            nn.ReLU(inplace=False)
        )

        self.exitconv_op_2 = nn.Sequential(
            Dsc(1536, 1536, voidratio=ExittModule的voidratio[1]),
            nn.BatchNorm2d(1536),
            nn.ReLU(inplace=False)
        )

        self.exitconv_op_3 = nn.Sequential(
            Dsc(1536, 2048, voidratio=ExittModule的voidratio[1]),
            nn.BatchNorm2d(2048),
            nn.ReLU(inplace=False)
        )

    def forward(self, x):

        x = self.inconv1(x)  # 1/2
        x = self.inBN1(x)
        x = self.relu(x)

        x = self.inconv2(x)
        x = self.inBN2(x)
        x = self.relu(x)

        x = self.instack1(x)  # 1/4
        DCNNdown = x
        x = self.instack2(x)
        x = self.instack3(x)

        x = self.midstack1(x)

        x = self.ExittModule(x)
        x = self.exitconv_op_1(x)
        x = self.exitconv_op_2(x)
        x = self.exitconv_op_3(x)

        return x, DCNNdown


class DeepLabV3(nn.Module):
    def __init__(self, inputimg_ch=3, class_num=2, down_ratio=16):
        super(DeepLabV3, self).__init__()

        self.XceptionFeature = Xception(inputimg_ch, reduce_ratio=down_ratio)
        self.ASPP = ASPP(2048, 256, reduce_ratio=16)

        self.exASPPconv = nn.Sequential(
            nn.Conv2d(256, 256, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU())

        self.DCNNdownConv = nn.Sequential(
            nn.Conv2d(128, 48, 1, bias=False),
            nn.BatchNorm2d(48),
            nn.ReLU()
        )
        self.conv_3 = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3,
                      stride=1, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3,
                      stride=1, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, class_num, kernel_size=1, stride=1)
        )

        self._initweight()

    def forward(self, input):
        x, DCNNdownImg = self.XceptionFeature(input)
        x = self.ASPP(x)
        x = self.exASPPconv(x)
        x = F.interpolate(x, size=(int(math.ceil(input.size()[-2] / 4)),
                                   int(math.ceil(input.size()[-1] / 4))), mode='bilinear', align_corners=True)

        DCNNdownImg = self.DCNNdownConv(DCNNdownImg)

        x = torch.cat((x, DCNNdownImg), dim=1)
        x = self.conv_3(x)
        x = F.interpolate(x, size=input.size()[
                          2:], mode='bilinear', align_corners=True)
        
        '''
        这个激活函数最好加一下，
        如果不加的话，训练时选用的损失函数改用BCEWithLogitsLoss,
        对于本次的二分类，如果采用softmax归一化收敛效果并不是很好。
        (还有一个比较奇怪的现象，如果采用BCEWithLogitsLoss，似乎可以加快收敛)
        '''
        x = torch.sigmoid(x)

        return x

    def _initweight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()


if __name__ == "__main__":
    pic = Image.open("Dataset\\Train\\img(1)\\img.png")
    pic = pic.convert("RGB")
    trans = transforms.Compose([
        transforms.ToTensor()])
    tensorpic = trans(pic)
    # tensorpic=torch.tensor(img2,dtype=torch.float)
    net = DeepLabV3()
    net.eval()
    tensorpic=np.array(tensorpic)
    tensorpic=np.array([tensorpic])
    p=net(torch.tensor(tensorpic))
    print(p.size())
