# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 09:57:49 2019

@author: Fsl
"""

import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34
# from resnet import resnet34
# import resnet
from torch.nn import functional as F
import torchsummary
from torch.nn import init
up_kwargs = {'mode': 'bilinear', 'align_corners': True}


# **********融合所有前面stage的特征 and  拼接解码器的特征
class CPFNet(nn.Module):
    #  'BaseNet': CPFNet(out_planes=args.num_classes)
    def __init__(self, out_planes=1, ccm=True, norm_layer=nn.BatchNorm2d, is_training=True, expansion=2, base_channel=32):
        super(CPFNet, self).__init__()
        self.out_chanel = out_planes
        self.backbone = resnet34(pretrained=True)
        self.expansion = expansion
        self.base_channel = base_channel
        if self.expansion == 4 and self.base_channel == 64:
            expan = [512, 1024, 2048]
            spatial_ch = [128, 256]
        elif self.expansion == 4 and self.base_channel == 32:
            expan = [256, 512, 1024]
            spatial_ch = [32, 128]
            conv_channel_up = [256, 384, 512]
        elif self.expansion == 2 and self.base_channel == 32:   # ✔
            #         c3  c4   c5
            expan = [128, 256, 512]
            #             c1  c2
            spatial_ch = [64, 64]
            conv_channel_up = [128, 256, 512]
      
        conv_channel = expan[0]

        self.is_training = is_training
        self.sap = SAPblock(expan[-1])  # 512

        # in -> 1/32  512   out -> 1/16  256
        self.decoder5 = DecoderBlock(expan[-1], expan[-2], relu=False, last=True)   # 256

        self.decoder4 = DecoderBlock(expan[-2], expan[-3], relu=False)              # 128
        self.decoder3 = DecoderBlock(expan[-3], spatial_ch[-1], relu=False)         # 64
        self.decoder2 = DecoderBlock(spatial_ch[-1], spatial_ch[-2])                # 32

        self.mce_2 = GPG_2([spatial_ch[-2], spatial_ch[-1]], width=spatial_ch[-1], up_kwargs=up_kwargs)
        self.mce_3 = GPG_3([spatial_ch[-2], spatial_ch[-1], expan[0]], width=expan[0], up_kwargs=up_kwargs)
        self.mce_4 = GPG_4([spatial_ch[-2], spatial_ch[-1], expan[0], expan[1]], width=expan[1], up_kwargs=up_kwargs)

        self.main_head = BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
       
        self.relu = nn.ReLU()
        self.sigmoid = torch.nn.Sigmoid()

        self.to_d4 = Concat_to_d4()
        self.to_d3 = Concat_to_d3()
        self.to_d2 = Concat_to_d2()
        self.to_d1 = Concat_to_d1()


    def forward(self, x):
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        c1 = self.backbone.relu(x)        # 1/2  64
        x = self.backbone.maxpool(c1)     # 1/4  64

        c2 = self.backbone.layer1(x)      # 1/4   64
        c3 = self.backbone.layer2(c2)     # 1/8   128
        c4 = self.backbone.layer3(c3)     # 1/16   256
        c5 = self.backbone.layer4(c4)     # 1/32   512

        m2 = self.mce_2(c1, c2)
        m3 = self.mce_3(c1, c2, c3)
        m4 = self.mce_4(c1, c2, c3, c4)

        d5 = self.sap(c5)

        d4 = self.to_d4(m4, d5)
        d3 = self.to_d3(m3, d4, d5)
        d2 = self.to_d2(m2, d3, d4, d5)
        d1 = self.to_d1(c1, d2, d3, d4, d5)

        main_out = self.main_head(d1)

        # if self.out_chanel > 1:
        #     main_out = F.log_softmax(main_out, dim=1)
        # else:
        #     main_out = torch.sigmoid(main_out)

        # 是在网络添加激活函数还是评估的时候添加???
        main_out = self.sigmoid(main_out)

        return main_out, main_out

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)
                # m.weight.data.fill_(1)
                # m.bias.data.zero_()
#        return F.logsigmoid(main_out,dim=1)


class Concat_to_d4(nn.Module):
    def __init__(self, in_channels=512, width=256, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(Concat_to_d4, self).__init__()
        self.up_kwargs = up_kwargs
        self.decoder5 = MyDecoderBlock(512, 256, scale=2)

        self.conv_out = ConvBnRelu(2*width, width, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

    def forward(self, *inputs):
        # inputs: m4 d5
        # return: d4

        # 放大到m4的大小，与m4拼接
        # 输出：3*3卷积, BN, ReLU

        feats = [inputs[-2], self.decoder5(inputs[-1])]
        feat = torch.cat(feats, dim=1)
        feat = self.conv_out(feat)
        return feat


class Concat_to_d3(nn.Module):
    def __init__(self, in_channels=512, width=128, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(Concat_to_d3, self).__init__()
        self.up_kwargs = up_kwargs
        self.decoder5 = MyDecoderBlock(512, 128, scale=4)
        self.decoder4 = MyDecoderBlock(256, 128, scale=2)

        self.conv_out = ConvBnRelu(3 * width, width, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

    def forward(self, *inputs):
        # inputs: m3 d4 d5
        # return: d3

        # 放大到m4的大小，与m4拼接
        # 输出：3*3卷积, BN, ReLU

        feats = [inputs[-3], self.decoder4(inputs[-2]), self.decoder5(inputs[-1])]
        feat = torch.cat(feats, dim=1)
        feat = self.conv_out(feat)
        return feat


class Concat_to_d2(nn.Module):
    def __init__(self, in_channels=512, width=64, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(Concat_to_d2, self).__init__()
        self.up_kwargs = up_kwargs
        self.decoder5 = MyDecoderBlock(512, 64, scale=8)
        self.decoder4 = MyDecoderBlock(256, 64, scale=4)
        self.decoder3 = MyDecoderBlock(128, 64, scale=2)

        self.conv_out = ConvBnRelu(4 * width, width, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

    def forward(self, *inputs):
        # inputs: m2 d3 d4 d5
        # return: d2

        # 放大到m4的大小，与m4拼接
        # 输出：3*3卷积, BN, ReLU

        feats = [inputs[-4], self.decoder3(inputs[-3]), self.decoder4(inputs[-2]), self.decoder5(inputs[-1])]
        feat = torch.cat(feats, dim=1)
        feat = self.conv_out(feat)
        return feat


class Concat_to_d1(nn.Module):
    def __init__(self, in_channels=512, width=64, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(Concat_to_d1, self).__init__()
        self.up_kwargs = up_kwargs
        self.decoder5 = MyDecoderBlock(512, 64, scale=16)
        self.decoder4 = MyDecoderBlock(256, 64, scale=8)
        self.decoder3 = MyDecoderBlock(128, 64, scale=4)
        self.decoder2 = MyDecoderBlock(64, 64, scale=2)

        self.conv_out = ConvBnRelu(5 * width, width, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

    def forward(self, *inputs):
        # inputs: c1 d2 d3 d4 d5
        # return: d1

        # 放大到m4的大小，与m4拼接
        # 输出：3*3卷积, BN, ReLU

        feats = [inputs[-5], self.decoder2(inputs[-4]), self.decoder3(inputs[-3]), self.decoder4(inputs[-2]), self.decoder5(inputs[-1])]
        feat = torch.cat(feats, dim=1)
        feat = self.conv_out(feat)
        return feat


class GPG_4(nn.Module):
    # expan = [128, 256, 512]
    # GPG_4([expan[1], expan[2]], width=expan[1], up_kwargs=up_kwargs)
    def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(GPG_4, self).__init__()
        self.up_kwargs = up_kwargs

        # 卷积到相同的通道
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels[-4], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        # 低级特征到高级特征的 缩放 c1 -> c4, 1/8    c2 -> c4, 1/4    c3 -> c4, 1/2
        self.c1_to_c4 = nn.MaxPool2d(8, 8)
        self.c2_to_c4 = nn.MaxPool2d(4, 4)
        self.c3_to_c4 = nn.MaxPool2d(2, 2)

        # 最终的输出通道拓深了的倍数与空洞卷积的数量相同 2
        self.conv_out = nn.Sequential(
            nn.Conv2d(2 * width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        # 空洞前的输出通道拓深了 5倍
        self.dilation1 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.dilation2 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):
        # inputs c1 c2 c3 c4
        # feats  c4 c3 c2 c1
        feats = [self.conv4(inputs[-1]), self.conv3(inputs[-2]), self.conv2(inputs[-3]), self.conv1(inputs[-4])]
        _, _, h, w = feats[-4].size()  # 最大的特征图

        feats[-3] = self.c3_to_c4(feats[-3])
        feats[-2] = self.c2_to_c4(feats[-2])
        feats[-1] = self.c1_to_c4(feats[-1])

        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat)], dim=1)
        feat = self.conv_out(feat)
        return feat


class GPG_3(nn.Module):
    def __init__(self, in_channels, width=512, up_kwargs=None,norm_layer=nn.BatchNorm2d):
        super(GPG_3, self).__init__()
        self.up_kwargs = up_kwargs

        # 卷积到相同的通道
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        # 低级特征到高级特征的 缩放 c1 -> c3, 1/4    c2 -> c3, 1/2
        self.c1_to_c3 = nn.MaxPool2d(4, 4)
        self.c2_to_c3 = nn.MaxPool2d(2, 2)

        # 最终的输出通道拓深了的倍数与空洞卷积的数量相同 3
        self.conv_out = nn.Sequential(
            nn.Conv2d(3*width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        # 空洞前的输出通道拓深了 5倍
        self.dilation1 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=1, dilation=1, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))

        self.dilation2 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=2, dilation=2, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))

        self.dilation3 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=4, dilation=4, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):
        # inputs c1 c2 c3
        # feats  c3 c2 c1
        feats = [self.conv3(inputs[-1]), self.conv2(inputs[-2]), self.conv1(inputs[-3])]
        _, _, h, w = feats[-3].size()

        feats[-2] = self.c2_to_c3(feats[-2])
        feats[-1] = self.c1_to_c3(feats[-1])

        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat)], dim=1)
        feat = self.conv_out(feat)
        return feat


class GPG_2(nn.Module):
    def __init__(self, in_channels, width=512, up_kwargs=None,norm_layer=nn.BatchNorm2d):
        super(GPG_2, self).__init__()
        self.up_kwargs = up_kwargs
        
        # 卷积到相同的通道
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        # 低级特征到高级特征的 缩放 c1 -> c2, 1/2
        self.c1_to_c2 = nn.MaxPool2d(2, 2)

        # 最终的输出通道拓深了的倍数与空洞卷积的数量相同 4
        self.conv_out = nn.Sequential(
            nn.Conv2d(4*width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        # 空洞前的输出通道拓深了 5倍
        self.dilation1 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=1, dilation=1, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))
        self.dilation2 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=2, dilation=2, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))
        self.dilation3 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=4, dilation=4, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))
        self.dilation4 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=8, dilation=8, bias=False),
                                       nn.BatchNorm2d(width),
                                       nn.ReLU(inplace=True))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):
        # inputs c1 c2
        # feats  c2 c1
        feats = [self.conv2(inputs[-1]), self.conv1(inputs[-2])]

        # 标准数据大小对应 -2
        _, _, h, w = feats[-2].size()
        feats[-1] = self.c1_to_c2(feats[-1])

        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1)
        feat = self.conv_out(feat)
        return feat


class BaseNetHead(nn.Module):
    #                  64            1              是否中间缩一下通道数到32
    # BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
    def __init__(self, in_planes, out_planes, scale,
                 is_aux=False, norm_layer=nn.BatchNorm2d):
        super(BaseNetHead, self).__init__()
        if is_aux:
            self.conv_1x1_3x3=nn.Sequential(
                ConvBnRelu(in_planes, 64, 1, 1, 0,
                                       has_bn=True, norm_layer=norm_layer,
                                       has_relu=True, has_bias=False),
                ConvBnRelu(64, 64, 3, 1, 1,
                                       has_bn=True, norm_layer=norm_layer,
                                       has_relu=True, has_bias=False))
        else:
            self.conv_1x1_3x3=nn.Sequential(
                ConvBnRelu(in_planes, 32, 1, 1, 0,
                                       has_bn=True, norm_layer=norm_layer,
                                       has_relu=True, has_bias=False),
                ConvBnRelu(32, 32, 3, 1, 1,
                                       has_bn=True, norm_layer=norm_layer,
                                       has_relu=True, has_bias=False))
        # self.dropout = nn.Dropout(0.1)
        if is_aux:
            self.conv_1x1_2 = nn.Conv2d(64, out_planes, kernel_size=1,
                                      stride=1, padding=0)
        else:
            self.conv_1x1_2 = nn.Conv2d(32, out_planes, kernel_size=1,
                                      stride=1, padding=0)
        self.scale = scale
            
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        if self.scale > 1:
            x = F.interpolate(x, scale_factor=self.scale,
                                   mode='bilinear',
                                   align_corners=True)
        fm = self.conv_1x1_3x3(x)
        # fm = self.dropout(fm)
        output = self.conv_1x1_2(fm)
        return output


# ✔
class SAPblock(nn.Module):
    def __init__(self, in_channels):
        # SAPblock(expan[-1])  # 512
        super(SAPblock, self).__init__()
        self.conv3x3 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, dilation=1, kernel_size=3, padding=1)
        
        self.bn = nn.ModuleList([nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels)])

        self.conv1x1 = nn.ModuleList([nn.Conv2d(in_channels=2*in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0),
                                     nn.Conv2d(in_channels=2*in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0)])

        self.conv3x3_1 = nn.ModuleList([nn.Conv2d(in_channels=in_channels, out_channels=in_channels//2, dilation=1, kernel_size=3, padding=1),
                                       nn.Conv2d(in_channels=in_channels, out_channels=in_channels//2, dilation=1, kernel_size=3, padding=1)])

        self.conv3x3_2 = nn.ModuleList([nn.Conv2d(in_channels=in_channels//2, out_channels=2, dilation=1, kernel_size=3, padding=1),
                                       nn.Conv2d(in_channels=in_channels//2, out_channels=2, dilation=1, kernel_size=3, padding=1)])

        self.conv_last = ConvBnRelu(in_planes=in_channels, out_planes=in_channels, ksize=1, stride=1, pad=0, dilation=1)

        self.gamma = nn.Parameter(torch.zeros(1))
    
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):

        x_size = x.size()

        branches_1 = self.conv3x3(x)
        branches_1 = self.bn[0](branches_1)

        branches_2 = F.conv2d(x, self.conv3x3.weight, padding=2, dilation=2)  # share weight
        branches_2 = self.bn[1](branches_2)

        branches_3 = F.conv2d(x, self.conv3x3.weight, padding=4, dilation=4)  # share weight
        branches_3 = self.bn[2](branches_3)

        #                   Fa      ,   Fb
        # 拼接 通道*2
        feat = torch.cat([branches_1, branches_2], dim=1)
        # feat=feat_cat.detach()
        # 1*1卷积到原大小和通道512，3*3卷积 通道减半到256
        feat = self.relu(self.conv1x1[0](feat))
        feat = self.relu(self.conv3x3_1[0](feat))
        # 3*3卷积到 通道数降为 2，通道上进行softmax
        att = self.conv3x3_2[0](feat)
        att = F.softmax(att, dim=1)
        # print(att.shape)
        att_1 = att[:, 0, :, :].unsqueeze(1)
        att_2 = att[:, 1, :, :].unsqueeze(1)
        # print(att_1.shape)
        # print(att_2.shape)

        fusion_1_2 = att_1*branches_1 + att_2*branches_2

        feat1 = torch.cat([fusion_1_2, branches_3], dim=1)
        # feat=feat_cat.detach()
        feat1 = self.relu(self.conv1x1[0](feat1))
        feat1 = self.relu(self.conv3x3_1[0](feat1))
        att1 = self.conv3x3_2[0](feat1)
        att1 = F.softmax(att1, dim=1)
        
        att_1_2 = att1[:, 0, :, :].unsqueeze(1)
        att_3 = att1[:, 1, :, :].unsqueeze(1)

        ax = self.relu(self.gamma*(att_1_2*fusion_1_2 + att_3*branches_3)+(1-self.gamma)*x)
        ax = self.conv_last(ax)

        return ax


class DecoderBlock(nn.Module):
    # DecoderBlock(expan[-1], expan[-2], relu=False, last=True)

    def __init__(self, in_planes, out_planes,
                 norm_layer=nn.BatchNorm2d, scale=2, relu=True, last=False):
        super(DecoderBlock, self).__init__()
        self.conv_3x3 = ConvBnRelu(in_planes, in_planes, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)
        self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)
       
        self.sap = SAPblock(in_planes)  # 没用到
        self.scale = scale
        self.last = last

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        if self.last == False:
            x = self.conv_3x3(x)
            # x=self.sap(x)
        if self.scale > 1:
            x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
        x = self.conv_1x1(x)
        return x


# 修改版
class MyDecoderBlock(nn.Module):
    # DecoderBlock(expan[-1], expan[-2], relu=False, last=True)

    def __init__(self, in_planes, out_planes, norm_layer=nn.BatchNorm2d, scale=2, relu=True, last=False):
        super(MyDecoderBlock, self).__init__()
        self.conv_3x3 = ConvBnRelu(in_planes, in_planes, 3, 1, 1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)
        self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

        self.sap = SAPblock(in_planes)  # 没用到
        self.scale = scale
        self.last = last

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        if self.last == False:
            x = self.conv_3x3(x)
            # x=self.sap(x)
        if self.scale > 1:
            x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
        x = self.conv_1x1(x)
        return x



class SeparableConv2d(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d):
        super(SeparableConv2d, self).__init__()

        self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
        self.bn = BatchNorm(inplanes)
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn(x)
        x = self.pointwise(x)
        return x


class ConvBnRelu(nn.Module):
    def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
                 groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
                 has_relu=True, inplace=True, has_bias=False):
        super(ConvBnRelu, self).__init__()

        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
                              stride=stride, padding=pad,
                              dilation=dilation, groups=groups, bias=has_bias)
        self.has_bn = has_bn
        if self.has_bn:
            self.bn = nn.BatchNorm2d(out_planes)
        self.has_relu = has_relu
        if self.has_relu:
            self.relu = nn.ReLU(inplace=inplace)

    def forward(self, x):
        x = self.conv(x)
        if self.has_bn:
            x = self.bn(x)
        if self.has_relu:
            x = self.relu(x)

        return x


class GlobalAvgPool2d(nn.Module):
    def __init__(self):
        """Global average pooling over the input's spatial dimensions"""
        super(GlobalAvgPool2d, self).__init__()

    def forward(self, inputs):
        in_size = inputs.size()
        inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
        inputs = inputs.view(in_size[0], in_size[1], 1, 1)

        return inputs


print(" Using BaseNet_V2 : 融合所有前面stage的特征 and 拼接解码器的特征")

if __name__ == '__main__':
    model = CPFNet()
    model.cuda(0)
    torchsummary.summary(model, (3, 256, 256))
