# -*- coding: utf-8 -*-


import torch
from torchvision import models
import torch.nn as nn
from model.ba_resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp import build_aspp

up_kwargs = {'mode': 'bilinear', 'align_corners': True}


class BACPFNet(nn.Module):
    #  'BaseNet': CPFNet(out_planes=args.num_classes)
    def __init__(self, out_planes=1, ccm=True, norm_layer=nn.BatchNorm2d, is_training=True, expansion=2,
                 base_channel=32):
        super(BACPFNet, self).__init__()
        self.out_chanel = out_planes

        self.network = "resnet34"
        # self.network = "resnet101"

        if self.network == "resnet34":
            self.backbone = resnet34(pretrained=True)
            self.in_c = [64, 64, 128, 256, 512]
        elif self.network == "resnet101":
            self.backbone = resnet101(pretrained=True)
            self.in_c = [64, 256, 512, 1024, 2048]

        # 输入的通道为in_c[-1]
        # 输出的通道为256
        self.aspp_o_c = 512
        self.aspp_s = build_aspp(self.in_c[-1], self.aspp_o_c)
        self.aspp_e = build_aspp(self.in_c[-1], self.aspp_o_c)

        self.in_de_c = 256  # 特征解码器对编码器的特征统一降通道
        #                        通道列表     Aspp输出通道           降尺寸通道        任务类型 F===edge, T===seg
        self.decoder_e = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=False)
        self.decoder_s = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=True)

        # 边界注意力模块
        self.BaSm2 = BaSm(self.in_c[-4], self.in_c[-4])
        self.BaSm3 = BaSm(self.in_c[-3], self.in_c[-3])
        self.BaSm4 = BaSm(self.in_c[-2], self.in_c[-2])
        self.BaSm5 = BaSm(self.in_c[-1], self.in_c[-1])
        self.BaSm6 = BaSm(self.in_c[-1], self.in_c[-1])

        # 主要的边界图，分割图获取
        self.main_head_e = BaseNetHead(self.in_c[-4], out_planes, 4, is_aux=False, norm_layer=norm_layer)
        # self.main_head_s = BaseNetHead(self.in_de_c, out_planes, 4, is_aux=False, norm_layer=norm_layer)

        # 辅助分割图的获取
        self.main_head_s2 = BaseNetHead(self.in_c[-4], out_planes, 4, is_aux=False, norm_layer=norm_layer)
        self.main_head_s3 = BaseNetHead(self.in_c[-3], out_planes, 8, is_aux=False, norm_layer=norm_layer)
        self.main_head_s4 = BaseNetHead(self.in_c[-2], out_planes, 8, is_aux=False, norm_layer=norm_layer)
        self.main_head_s5 = BaseNetHead(self.in_c[-1], out_planes, 8, is_aux=False, norm_layer=norm_layer)

        expan = [128, 256, 512]
        spatial_ch = [64, 64]
        self.mce_2 = GPG_2([spatial_ch[-1], expan[0], expan[1], expan[2]], width=spatial_ch[-1], up_kwargs=up_kwargs)
        self.mce_3 = GPG_3([expan[0], expan[1], expan[2]], width=expan[0], up_kwargs=up_kwargs)
        self.mce_4 = GPG_4([expan[1], expan[2]], width=expan[1], up_kwargs=up_kwargs)
        self.mce_22 = GPG_2([spatial_ch[-1], expan[0], expan[1], expan[2]], width=spatial_ch[-1], up_kwargs=up_kwargs)
        self.mce_33 = GPG_3([expan[0], expan[1], expan[2]], width=expan[0], up_kwargs=up_kwargs)
        self.mce_44 = GPG_4([expan[1], expan[2]], width=expan[1], up_kwargs=up_kwargs)

        self.sap_e = SAPblock(expan[-1])
        self.sap_s = SAPblock(expan[-1])

        self.sigmoid_e = torch.nn.Sigmoid()
        # self.sigmoid_s = torch.nn.Sigmoid()

        self.sigmoid_s2 = torch.nn.Sigmoid()
        self.sigmoid_s3 = torch.nn.Sigmoid()
        self.sigmoid_s4 = torch.nn.Sigmoid()
        self.sigmoid_s5 = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x1 = self.backbone.relu(x)  # 1/2      64, 128, 128
        x = self.backbone.maxpool(x1)

        x2 = self.backbone.layer1(x)  # 1/4       64, 64, 64
        x3 = self.backbone.layer2(x2)  # 1/8      128, 32, 32
        x4 = self.backbone.layer3(x3)  # 1/16     256, 16, 16
        x5 = self.backbone.layer4(x4)  # 1/32     512, 8, 8

        e2 = self.mce_22(x2, x3, x4, x5)
        e3 = self.mce_33(x3, x4, x5)
        e4 = self.mce_44(x4, x5)
        e5 = x5

        s2 = self.mce_2(x2, x3, x4, x5)
        s3 = self.mce_3(x3, x4, x5)
        s4 = self.mce_4(x4, x5)
        s5 = x5

        s6 = self.aspp_s(x5)  # 分割
        e6 = self.aspp_e(x5)  # 边界
        # s6 = self.sap_s(x5)  # 分割
        # e6 = self.sap_e(x5)  # 边界

        # 获取边界预测图
        edge2, edge3, edge4, edge5 = self.decoder_e(e6, e2, e3, e4, e5)

        edge = self.main_head_e(edge2)
        out_edge = self.sigmoid_e(edge)

        # print("s6.size()", s6.size())
        ed6 = self.BaSm6(out_edge, s6)
        ed5 = self.BaSm5(out_edge, s5)
        ed4 = self.BaSm4(out_edge, s4)
        ed3 = self.BaSm3(out_edge, s3)
        ed2 = self.BaSm2(out_edge, s2)

        # 获取分割预测图
        seg2, seg3, seg4, seg5 = self.decoder_s(ed6, ed2, ed3, ed4, ed5)

        seg2 = self.main_head_s2(seg2)
        seg3 = self.main_head_s3(seg3)
        seg4 = self.main_head_s4(seg4)
        seg5 = self.main_head_s5(seg5)

        out_seg2 = self.sigmoid_s2(seg2)
        out_seg3 = self.sigmoid_s3(seg3)
        out_seg4 = self.sigmoid_s4(seg4)
        out_seg5 = self.sigmoid_s5(seg5)

        return out_seg2, out_edge, out_seg3, out_seg4, out_seg5

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)
            # m.weight.data.fill_(1)
            # m.bias.data.zero_()=2-0


class SAPblock(nn.Module):
   def __init__(self, in_channels):
      super(SAPblock, self).__init__()
      self.conv3x3 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, dilation=1, kernel_size=3,
                         padding=1)

      self.bn = nn.ModuleList([nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels)])
      self.conv1x1 = nn.ModuleList(
         [nn.Conv2d(in_channels=2 * in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0),
          nn.Conv2d(in_channels=2 * in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0)])
      self.conv3x3_1 = nn.ModuleList(
         [nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 2, dilation=1, kernel_size=3, padding=1),
          nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 2, dilation=1, kernel_size=3, padding=1)])
      self.conv3x3_2 = nn.ModuleList(
         [nn.Conv2d(in_channels=in_channels // 2, out_channels=2, dilation=1, kernel_size=3, padding=1),
          nn.Conv2d(in_channels=in_channels // 2, out_channels=2, dilation=1, kernel_size=3, padding=1)])
      self.conv_last = ConvBnRelu(in_planes=in_channels, out_planes=in_channels, ksize=1, stride=1, pad=0, dilation=1)

      self.gamma = nn.Parameter(torch.zeros(1))

      self.relu = nn.ReLU(inplace=True)

   def forward(self, x):
      x_size = x.size()

      branches_1 = self.conv3x3(x)
      branches_1 = self.bn[0](branches_1)

      branches_2 = F.conv2d(x, self.conv3x3.weight, padding=2, dilation=2)  # share weight
      branches_2 = self.bn[1](branches_2)

      branches_3 = F.conv2d(x, self.conv3x3.weight, padding=4, dilation=4)  # share weight
      branches_3 = self.bn[2](branches_3)

      feat = torch.cat([branches_1, branches_2], dim=1)
      # feat=feat_cat.detach()
      feat = self.relu(self.conv1x1[0](feat))
      feat = self.relu(self.conv3x3_1[0](feat))
      att = self.conv3x3_2[0](feat)
      att = F.softmax(att, dim=1)

      att_1 = att[:, 0, :, :].unsqueeze(1)
      att_2 = att[:, 1, :, :].unsqueeze(1)

      fusion_1_2 = att_1 * branches_1 + att_2 * branches_2

      feat1 = torch.cat([fusion_1_2, branches_3], dim=1)
      # feat=feat_cat.detach()
      feat1 = self.relu(self.conv1x1[0](feat1))
      feat1 = self.relu(self.conv3x3_1[0](feat1))
      att1 = self.conv3x3_2[0](feat1)
      att1 = F.softmax(att1, dim=1)

      att_1_2 = att1[:, 0, :, :].unsqueeze(1)
      att_3 = att1[:, 1, :, :].unsqueeze(1)

      ax = self.relu(self.gamma * (att_1_2 * fusion_1_2 + att_3 * branches_3) + (1 - self.gamma) * x)
      ax = self.conv_last(ax)

      return ax


class Decoder(nn.Module):
    def __init__(self, in_channels, bos_c, in_de_c=256, task_type=False):
        super(Decoder, self).__init__()
        self.task_type = task_type

        self.fuse_x5_bos5 = DecoderUnit(in_channels[-1], bos_c, in_de_c, 8)
        self.fuse_x4_x5 = DecoderUnit(in_channels[-2], in_channels[-1], in_de_c, 16)
        self.fuse_x3_x4 = DecoderUnit(in_channels[-3], in_channels[-2], in_de_c, 32)
        self.fuse_x2_x3 = DecoderUnit(in_channels[-4], in_channels[-3], in_de_c, 64)

    # self.fuse_x1_x2 = DecoderUnit(in_channels[-5], in_channels[-4], in_de_c)

    def forward(self, bos5, *inputs):
        # inputs = [ed1 ed2 ed3 ed4 ed5]   bos5
        d5 = self.fuse_x5_bos5(inputs[-1], bos5)
        d4 = self.fuse_x4_x5(inputs[-2], d5)
        d3 = self.fuse_x3_x4(inputs[-3], d4)
        d2 = self.fuse_x2_x3(inputs[-4], d3)
        # d1 = self.fuse_x1_x2(inputs[-5], d2)

        if self.task_type:
            return d2, d3, d4, d5
        elif not self.task_type:
            return d2, d3, d4, d5


class DecoderUnit(nn.Module):
    def __init__(self, pre_c, back_c, in_de_c, pool_size):
        super(DecoderUnit, self).__init__()
        self.out_c = pre_c
        # print(self.out_c/4)
        self.conv_1x1_back = nn.Sequential(
            nn.Conv2d(back_c, self.out_c, 1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.conv_1x1_pre = nn.Sequential(
            nn.Conv2d(pre_c, self.out_c, 1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.conv3x3 = nn.Sequential(
            nn.Conv2d(2 * self.out_c, self.out_c, 3, padding=1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.sigmoid = torch.nn.Sigmoid()

        self.pool = nn.AvgPool2d(pool_size)

        self.att_1x1_1 = nn.Sequential(
            nn.Conv2d(self.out_c, int(self.out_c / 16), 1, padding=0, bias=False),
            nn.ReLU(inplace=True))
        self.att_1x1_2 = nn.Sequential(
            nn.Conv2d(int(self.out_c / 16), self.out_c, 1, padding=0, bias=False))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, pre, back):
        # 边界的解码器的需要降通道，分割的通道在BaSm里面已经降到256
        # 首先,对编码器特征降通道到 ,包括back特征和pre特征
        # 对高级特征上采样   up_kwargs = {'mode': 'bilinear', 'align_corners': True}    size()[2:]
        # concat拼接
        # 卷积降通道  3*3融合
        # sigmoid自注意力
        if back.size()[1] != self.out_c:
            back = self.conv_1x1_back(back)

        if back.size()[2:] != pre.size()[2:]:
            back = F.interpolate(back, pre.size()[2:], **up_kwargs)

        if pre.size()[1] != self.out_c:
            pre = self.conv_1x1_pre(pre)

        fusion = torch.cat([pre, back], dim=1)
        fusion = self.conv3x3(fusion)
        pool_att = self.pool(fusion)
        pool_att = self.att_1x1_1(pool_att)
        pool_att = self.att_1x1_2(pool_att)
        pool_att = self.sigmoid(pool_att)
        fusion = pre * pool_att + back
        return fusion


class BaSm(nn.Module):
    def __init__(self, inchannels, in_de_c):
        super(BaSm, self).__init__()
        self.conv1x1 = nn.Sequential(
            nn.Conv2d(inchannels, in_de_c, 3, padding=1, bias=False),
            nn.BatchNorm2d(in_de_c),
            nn.ReLU(inplace=True))

        self.sigmoid = nn.Sigmoid()
        self.conv3x3 = nn.Conv2d(2*in_de_c, in_de_c, 3, padding=1, bias=False)
        self.dropout = nn.Dropout(0.1)
        self.fc1 = nn.Conv2d(in_de_c, in_de_c // 16, kernel_size=1)
        self.fc2 = nn.Conv2d(in_de_c // 16, in_de_c, kernel_size=1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, edge, x):
        edge = F.interpolate(edge, x.size()[2:], mode='bilinear', align_corners=True)
        x2 = edge * x
        fusion = torch.cat([x2, x], dim=1)
        fusion = self.conv3x3(fusion)
        w = F.avg_pool2d(fusion, fusion.size(2))
        w = F.relu(self.fc1(w))
        w = F.sigmoid(self.fc2(w))
        fusion = w * fusion
        return fusion


class BaseNetHead(nn.Module):
    #                  64            1              是否中间缩一下通道数到32
    # BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
    def __init__(self, in_planes, out_planes, scale, is_aux=False, norm_layer=nn.BatchNorm2d):
        super(BaseNetHead, self).__init__()
        if is_aux:
            self.conv_1x1_3x3 = nn.Sequential(
                ConvBnRelu(in_planes, 64, 1, 1, 0, has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False),
                ConvBnRelu(64, 64, 3, 1, 1, has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False))
        else:
            self.conv_1x1_3x3 = nn.Sequential(
                ConvBnRelu(in_planes, 32, 1, 1, 0,
                           has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False),
                ConvBnRelu(32, 32, 3, 1, 1,
                           has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False))
        self.dropout = nn.Dropout(0.1)
        if is_aux:
            self.conv_1x1_2 = nn.Conv2d(64, out_planes, kernel_size=1,
                                        stride=1, padding=0)
        else:
            self.conv_1x1_2 = nn.Conv2d(32, out_planes, kernel_size=1,
                                        stride=1, padding=0)
        self.scale = scale

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        if self.scale > 1:
            x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
        fm = self.conv_1x1_3x3(x)
        # fm = self.dropout(fm)
        output = self.conv_1x1_2(fm)
        return output


class ConvBnRelu(nn.Module):
    def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
                 groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
                 has_relu=True, inplace=True, has_bias=False):
        super(ConvBnRelu, self).__init__()

        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
                              stride=stride, padding=pad,
                              dilation=dilation, groups=groups, bias=has_bias)
        self.has_bn = has_bn
        if self.has_bn:
            self.bn = nn.BatchNorm2d(out_planes)
        self.has_relu = has_relu
        if self.has_relu:
            self.relu = nn.ReLU(inplace=inplace)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        x = self.conv(x)
        if self.has_bn:
            x = self.bn(x)
        if self.has_relu:
            x = self.relu(x)

        return x


class GPG_3(nn.Module):
    def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(GPG_3, self).__init__()
        self.up_kwargs = up_kwargs
        self.conv5 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv_out = nn.Sequential(
            nn.Conv2d(3 * width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        self.dilation1 = nn.Sequential(
            SeparableConv2d(3 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation2 = nn.Sequential(
            SeparableConv2d(3 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation3 = nn.Sequential(
            SeparableConv2d(3 * width, width, kernel_size=3, padding=4, dilation=4, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):
        feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])]
        _, _, h, w = feats[-1].size()
        feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
        feats[-3] = F.interpolate(feats[-3], (h, w), **self.up_kwargs)
        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat)], dim=1)
        feat = self.conv_out(feat)
        return feat


class GPG_4(nn.Module):
    def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(GPG_4, self).__init__()
        self.up_kwargs = up_kwargs

        self.conv5 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv_out = nn.Sequential(
            nn.Conv2d(2 * width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        self.dilation1 = nn.Sequential(
            SeparableConv2d(2 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation2 = nn.Sequential(
            SeparableConv2d(2 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):

        feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2])]
        _, _, h, w = feats[-1].size()
        feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat)], dim=1)
        feat = self.conv_out(feat)
        return feat


class GPG_2(nn.Module):
    def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
        super(GPG_2, self).__init__()
        self.up_kwargs = up_kwargs

        self.conv5 = nn.Sequential(
            nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels[-4], width, 3, padding=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))

        self.conv_out = nn.Sequential(
            nn.Conv2d(4 * width, width, 1, padding=0, bias=False),
            nn.BatchNorm2d(width))

        self.dilation1 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation2 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation3 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=4, dilation=4, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        self.dilation4 = nn.Sequential(
            SeparableConv2d(4 * width, width, kernel_size=3, padding=8, dilation=8, bias=False),
            nn.BatchNorm2d(width),
            nn.ReLU(inplace=True))
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, *inputs):

        feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3]), self.conv2(inputs[-4])]
        _, _, h, w = feats[-1].size()
        feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
        feats[-3] = F.interpolate(feats[-3], (h, w), **self.up_kwargs)
        feats[-4] = F.interpolate(feats[-4], (h, w), **self.up_kwargs)
        feat = torch.cat(feats, dim=1)
        feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)],
                         dim=1)
        feat = self.conv_out(feat)
        return feat


class SeparableConv2d(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False,
                 BatchNorm=nn.BatchNorm2d):
        super(SeparableConv2d, self).__init__()

        self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
        self.bn = BatchNorm(inplanes)
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn(x)
        x = self.pointwise(x)
        return x


BaseNet_version = "V12"

if __name__ == '__main__':
    import os

    BaseNet_version = "V1"
    print(BaseNet_version)
    print(" Using V1 : 改变了basm")

    model = BACPFNet()
    # print(model)
    # model = torch.nn.DataParallel(model).cuda()
    model.cuda()
    with torch.no_grad():
        model.eval()
        torchsummary.summary(model, (3, 256, 256))
