# -*- coding: utf-8 -*-


import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp import build_aspp

up_kwargs = {'mode': 'bilinear', 'align_corners': True}


class CPFNet(nn.Module):
	#  'BaseNet': CPFNet(out_planes=args.num_classes)
	def __init__(self, out_planes=1, ccm=True, norm_layer=nn.BatchNorm2d, is_training=True, expansion=2, base_channel=32):
		super(CPFNet, self).__init__()
		self.out_chanel = out_planes

		self.network = "resnet34"
		# self.network = "resnet101"
		
		if self.network == "resnet34":
			self.backbone = resnet34(pretrained=True)
			self.in_c = [64, 64, 128, 256, 512]
		# elif self.network == "resnet101":
		# 	self.backbone = resnet101(pretrained=True)
		# 	self.in_c = [64, 256, 512, 1024, 2048]
		
		# 输入的通道为in_c[-1]
		# 输出的通道为256
		self.aspp_o_c = 256
		self.aspp_s = build_aspp(self.in_c[-1], self.aspp_o_c)
		self.aspp_e = build_aspp(self.in_c[-1], self.aspp_o_c)
		
		self.in_de_c = 256  # 特征解码器对编码器的特征统一降通道
		#                        通道列表     Aspp输出通道           降尺寸通道        任务类型 F===edge, T===seg
		self.decoder_e = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=False)
		self.decoder_s = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=True)
		
		# 边界注意力模块
		self.BaSm2 = BaSm(self.in_c[-4], self.in_c[-4])
		self.BaSm3 = BaSm(self.in_c[-3], self.in_c[-3])
		self.BaSm4 = BaSm(self.in_c[-2], self.in_c[-2])
		self.BaSm5 = BaSm(self.in_c[-1], self.in_c[-1])
		
		# 主要的边界图，分割图获取
		self.main_head_e = BaseNetHead(self.in_c[-4], out_planes, 4, is_aux=False, norm_layer=norm_layer)
		# self.main_head_s = BaseNetHead(self.in_de_c, out_planes, 4, is_aux=False, norm_layer=norm_layer)
		
		# 辅助分割图的获取
		self.main_head_s2 = BaseNetHead(self.in_c[-4], out_planes, 4, is_aux=False, norm_layer=norm_layer)
		self.main_head_s3 = BaseNetHead(self.in_c[-3], out_planes, 8, is_aux=False, norm_layer=norm_layer)
		self.main_head_s4 = BaseNetHead(self.in_c[-2], out_planes, 8, is_aux=False, norm_layer=norm_layer)
		self.main_head_s5 = BaseNetHead(self.in_c[-1], out_planes, 8, is_aux=False, norm_layer=norm_layer)
		
		expan = [128, 256, 512]
		spatial_ch = [64, 64]
		self.mce_2 = GPG_2([spatial_ch[-1], expan[0], expan[1], expan[2]], width=spatial_ch[-1], up_kwargs=up_kwargs)
		self.mce_3 = GPG_3([expan[0], expan[1], expan[2]], width=expan[0], up_kwargs=up_kwargs)
		self.mce_4 = GPG_4([expan[1], expan[2]], width=expan[1], up_kwargs=up_kwargs)
		self.mce_22 = GPG_2([spatial_ch[-1], expan[0], expan[1], expan[2]], width=spatial_ch[-1], up_kwargs=up_kwargs)
		self.mce_33 = GPG_3([expan[0], expan[1], expan[2]], width=expan[0], up_kwargs=up_kwargs)
		self.mce_44 = GPG_4([expan[1], expan[2]], width=expan[1], up_kwargs=up_kwargs)
		
		self.sigmoid_e = torch.nn.Sigmoid()
		# self.sigmoid_s = torch.nn.Sigmoid()
		
		self.sigmoid_s2 = torch.nn.Sigmoid()
		self.sigmoid_s3 = torch.nn.Sigmoid()
		self.sigmoid_s4 = torch.nn.Sigmoid()
		self.sigmoid_s5 = torch.nn.Sigmoid()
	
	def forward(self, x):
		x = self.backbone.conv1(x)
		x = self.backbone.bn1(x)
		x1 = self.backbone.relu(x)      # 1/2      64, 128, 128
		x = self.backbone.maxpool(x1)
		
		x2 = self.backbone.layer1(x)    # 1/4      64, 64, 64
		x3 = self.backbone.layer2(x2)   # 1/8      128, 32, 32
		x4 = self.backbone.layer3(x3)   # 1/8      256, 32, 32
		x5 = self.backbone.layer4(x4)   # 1/8      512, 32, 32
		
		# print("-------------x1", x1.size())
		# print("-------------x2", x2.size())
		# print("-------------x3", x3.size())
		# print("-------------x4", x4.size())
		# print("-------------x5", x5.size())
		
		x2 = self.mce_2(x2, x3, x4, x5)
		x3 = self.mce_3(x3, x4, x5)
		x4 = self.mce_4(x4, x5)
		
		x22 = self.mce_22(x2, x3, x4, x5)
		x33 = self.mce_33(x3, x4, x5)
		x44 = self.mce_44(x4, x5)
					
		s5 = self.aspp_s(x5)  # 分割
		e5 = self.aspp_e(x5)  # 边界
		
		# print("-------------s5", s5.size())
		# print("-------------e5", e5.size())
		
		# 获取边界预测图
		edge2, edge3, edge4, edge5 = self.decoder_e(e5, x22, x33, x44, x5)
		
		# print("edge2.size()", edge2.size())
		
		edge = self.main_head_e(edge2)
		out_edge = self.sigmoid_e(edge)
					
		# print("edge.shape", edge.shape)
		# print("out_edge.shape", out_edge.shape)
		
		# def forward(self, edge, x):
		ed5 = self.BaSm5(out_edge, x5)
		ed4 = self.BaSm4(out_edge, x4)
		ed3 = self.BaSm3(out_edge, x3)
		ed2 = self.BaSm2(out_edge, x2)
		
		# print("ed5.shape", ed5.shape)
		# print("ed4.shape", ed4.shape)
		# print("ed3.shape", ed3.shape)/+/
		# print("ed2.shape", ed2.shape)
		
		# 获取分割预测图
		
		seg2, seg3, seg4, seg5 = self.decoder_s(s5, ed2, ed3, ed4, ed5)
		
		# print(seg2.shape)
		# print(seg3.shape)
		# print(seg4.shape)
		# print(seg5.shape)
		
		seg2 = self.main_head_s2(seg2)
		seg3 = self.main_head_s3(seg3)
		seg4 = self.main_head_s4(seg4)
		seg5 = self.main_head_s5(seg5)
		
		out_seg2 = self.sigmoid_s2(seg2)
		out_seg3 = self.sigmoid_s3(seg3)
		out_seg4 = self.sigmoid_s4(seg4)
		out_seg5 = self.sigmoid_s5(seg5)
		
		# print("out_seg2.shape", out_seg2.shape)
		# print("out_seg3.shape", out_seg2.shape)
		# print("out_seg4.shape", out_seg2.shape)
		# print("out_seg5.shape", out_seg2.shape)
		
		return out_seg2, out_edge, out_seg3, out_seg4, out_seg5
	
	def _initialize_weights(self):
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
			# m.weight.data.fill_(1)
			# m.bias.data.zero_()=2-0
			

class Decoder(nn.Module):
	def __init__(self, in_channels, bos_c, in_de_c=256, task_type=False):
		super(Decoder, self).__init__()
		self.task_type = task_type
		
		self.fuse_x5_bos5 = DecoderUnit(in_channels[-1], bos_c, in_de_c, 32)
		self.fuse_x4_x5 = DecoderUnit(in_channels[-2], in_channels[-1], in_de_c, 32)
		self.fuse_x3_x4 = DecoderUnit(in_channels[-3], in_channels[-2], in_de_c, 32)
		self.fuse_x2_x3 = DecoderUnit(in_channels[-4], in_channels[-3], in_de_c, 64)
	
	# self.fuse_x1_x2 = DecoderUnit(in_channels[-5], in_channels[-4], in_de_c)
	
	def forward(self, bos5, *inputs):
		# inputs = [ed1 ed2 ed3 ed4 ed5]   bos5
		d5 = self.fuse_x5_bos5(inputs[-1], bos5)
		d4 = self.fuse_x4_x5(inputs[-2], d5)
		d3 = self.fuse_x3_x4(inputs[-3], d4)
		d2 = self.fuse_x2_x3(inputs[-4], d3)
		# d1 = self.fuse_x1_x2(inputs[-5], d2)
		
		if self.task_type:
			return d2, d3, d4, d5
		elif not self.task_type:
			return d2, d3, d4, d5


class DecoderUnit(nn.Module):
	def __init__(self, pre_c, back_c, in_de_c, pool_size):
		super(DecoderUnit, self).__init__()
		self.out_c = pre_c
		# print(self.out_c/4)
		self.conv_1x1_back = nn.Sequential(
			nn.Conv2d(back_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))
		
		self.conv_1x1_pre = nn.Sequential(
			nn.Conv2d(pre_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))
		
		self.conv3x3 = nn.Sequential(
			nn.Conv2d(2 * self.out_c, self.out_c, 3, padding=1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))
		
		self.sigmoid = torch.nn.Sigmoid()
		
		self.pool = nn.AvgPool2d(pool_size)
		
		self.att_1x1_1 = nn.Sequential(
			nn.Conv2d(self.out_c, int(self.out_c / 16), 1, padding=0, bias=False),
			nn.ReLU(inplace=True))
		self.att_1x1_2 = nn.Sequential(
			nn.Conv2d(int(self.out_c / 16), self.out_c, 1, padding=0, bias=False))
		
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, pre, back):
		# 边界的解码器的需要降通道，分割的通道在BaSm里面已经降到256
		# 首先,对编码器特征降通道到 ,包括back特征和pre特征
		# 对高级特征上采样   up_kwargs = {'mode': 'bilinear', 'align_corners': True}    size()[2:]
		# concat拼接
		# 卷积降通道  3*3融合
		# sigmoid自注意力
		if back.size()[1] != self.out_c:
			back = self.conv_1x1_back(back)
			
		if back.size()[2:] != pre.size()[2:]:
			back = F.interpolate(back, pre.size()[2:], **up_kwargs)
			
		if pre.size()[1] != self.out_c:
			pre = self.conv_1x1_pre(pre)
			
		fusion = torch.cat([pre, back], dim=1)
		fusion = self.conv3x3(fusion)
		
		pool_att = self.pool(fusion)
		
		pool_att = self.att_1x1_1(pool_att)
		pool_att = self.att_1x1_2(pool_att)
		# pool_att = F.interpolate(pool_att, fusion.size()[2:], **up_kwargs)
		pool_att = self.sigmoid(pool_att)
		
		# print(pool_att.size())
		# print(pre.size())
		# print(back.size())
		# print()
		
		fusion = pre * pool_att + back
		# fusion = fusion * pool_att + fusion
		return fusion


class BaSm(nn.Module):
	def __init__(self, inchannels, in_de_c):
		super(BaSm, self).__init__()
		self.conv1x1 = nn.Sequential(
			nn.Conv2d(inchannels, in_de_c, 3, padding=1, bias=False),
			nn.BatchNorm2d(in_de_c),
			nn.ReLU(inplace=True))
		
		self.sigmoid = nn.Sigmoid()
		self.conv3x3 = nn.Conv2d(in_de_c * 2, in_de_c, 3, padding=1, bias=False)
		self.dropout = nn.Dropout(0.1)
		
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, edge, x):
		# 边界图将尺寸缩小到x
		# 特征图降通道
		# x Sigmoid自注意力
		# 边界注意力
		# add 操作融合
		# concat拼接
		# 3*3卷积融合
		# dropout
		edge = F.interpolate(edge, x.size()[2:], mode='bilinear', align_corners=True)
		x = self.conv1x1(x)
		x1 = x * self.sigmoid(x)  # zi zhu yi li hou de tu
		x2 = edge * x             # bianjie zhuyi
		fusion = x1 + x2
		fusion = torch.cat([x, fusion], dim=1)
		
		fusion = self.conv3x3(fusion)
		# fusion = self.dropout(fusion)
		return fusion


class BaseNetHead(nn.Module):
	#                  64            1              是否中间缩一下通道数到32
	# BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
	def __init__(self, in_planes, out_planes, scale, is_aux=False, norm_layer=nn.BatchNorm2d):
		super(BaseNetHead, self).__init__()
		if is_aux:
			self.conv_1x1_3x3 = nn.Sequential(
				ConvBnRelu(in_planes, 64, 1, 1, 0, has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False),
				ConvBnRelu(64, 64, 3, 1, 1, has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False))
		else:
			self.conv_1x1_3x3 = nn.Sequential(
				ConvBnRelu(in_planes, 32, 1, 1, 0,
				           has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False),
				ConvBnRelu(32, 32, 3, 1, 1,
				           has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False))
		self.dropout = nn.Dropout(0.1)
		if is_aux:
			self.conv_1x1_2 = nn.Conv2d(64, out_planes, kernel_size=1,
			                            stride=1, padding=0)
		else:
			self.conv_1x1_2 = nn.Conv2d(32, out_planes, kernel_size=1,
			                            stride=1, padding=0)
		self.scale = scale
		
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, x):
		if self.scale > 1:
			x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
		fm = self.conv_1x1_3x3(x)
		# fm = self.dropout(fm)
		output = self.conv_1x1_2(fm)
		return output


class ConvBnRelu(nn.Module):
	def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
	             groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
	             has_relu=True, inplace=True, has_bias=False):
		super(ConvBnRelu, self).__init__()
		
		self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
		                      stride=stride, padding=pad,
		                      dilation=dilation, groups=groups, bias=has_bias)
		self.has_bn = has_bn
		if self.has_bn:
			self.bn = nn.BatchNorm2d(out_planes)
		self.has_relu = has_relu
		if self.has_relu:
			self.relu = nn.ReLU(inplace=inplace)
		
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, x):
		x = self.conv(x)
		if self.has_bn:
			x = self.bn(x)
		if self.has_relu:
			x = self.relu(x)
		
		return x


class GPG_3(nn.Module):
	def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
		super(GPG_3, self).__init__()
		self.up_kwargs = up_kwargs
		self.conv5 = nn.Sequential(
			nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv4 = nn.Sequential(
			nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv3 = nn.Sequential(
			nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv_out = nn.Sequential(
			nn.Conv2d(3 * width, width, 1, padding=0, bias=False),
			nn.BatchNorm2d(width))
		
		self.dilation1 = nn.Sequential(SeparableConv2d(3 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation2 = nn.Sequential(SeparableConv2d(3 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation3 = nn.Sequential(SeparableConv2d(3 * width, width, kernel_size=3, padding=4, dilation=4, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, *inputs):
		feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])]
		_, _, h, w = feats[-1].size()
		feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
		feats[-3] = F.interpolate(feats[-3], (h, w), **self.up_kwargs)
		feat = torch.cat(feats, dim=1)
		feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat)], dim=1)
		feat = self.conv_out(feat)
		return feat


class GPG_4(nn.Module):
	def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
		super(GPG_4, self).__init__()
		self.up_kwargs = up_kwargs
		
		self.conv5 = nn.Sequential(
			nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv4 = nn.Sequential(
			nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv_out = nn.Sequential(
			nn.Conv2d(2 * width, width, 1, padding=0, bias=False),
			nn.BatchNorm2d(width))
		
		self.dilation1 = nn.Sequential(SeparableConv2d(2 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation2 = nn.Sequential(SeparableConv2d(2 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, *inputs):
		
		feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2])]
		_, _, h, w = feats[-1].size()
		feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
		feat = torch.cat(feats, dim=1)
		feat = torch.cat([self.dilation1(feat), self.dilation2(feat)], dim=1)
		feat = self.conv_out(feat)
		return feat


class GPG_2(nn.Module):
	def __init__(self, in_channels, width=512, up_kwargs=None, norm_layer=nn.BatchNorm2d):
		super(GPG_2, self).__init__()
		self.up_kwargs = up_kwargs
		
		self.conv5 = nn.Sequential(
			nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv4 = nn.Sequential(
			nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv3 = nn.Sequential(
			nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		self.conv2 = nn.Sequential(
			nn.Conv2d(in_channels[-4], width, 3, padding=1, bias=False),
			nn.BatchNorm2d(width),
			nn.ReLU(inplace=True))
		
		self.conv_out = nn.Sequential(
			nn.Conv2d(4 * width, width, 1, padding=0, bias=False),
			nn.BatchNorm2d(width))
		
		self.dilation1 = nn.Sequential(SeparableConv2d(4 * width, width, kernel_size=3, padding=1, dilation=1, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation2 = nn.Sequential(SeparableConv2d(4 * width, width, kernel_size=3, padding=2, dilation=2, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation3 = nn.Sequential(SeparableConv2d(4 * width, width, kernel_size=3, padding=4, dilation=4, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		self.dilation4 = nn.Sequential(SeparableConv2d(4 * width, width, kernel_size=3, padding=8, dilation=8, bias=False),
		                               nn.BatchNorm2d(width),
		                               nn.ReLU(inplace=True))
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	
	def forward(self, *inputs):
		
		feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3]), self.conv2(inputs[-4])]
		_, _, h, w = feats[-1].size()
		feats[-2] = F.interpolate(feats[-2], (h, w), **self.up_kwargs)
		feats[-3] = F.interpolate(feats[-3], (h, w), **self.up_kwargs)
		feats[-4] = F.interpolate(feats[-4], (h, w), **self.up_kwargs)
		feat = torch.cat(feats, dim=1)
		feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1)
		feat = self.conv_out(feat)
		return feat



class SeparableConv2d(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d):
        super(SeparableConv2d, self).__init__()

        self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
        self.bn = BatchNorm(inplanes)
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn(x)
        x = self.pointwise(x)
        return x


BaseNet_version = "V12"

if __name__ == '__main__':
	import os
	
	BaseNet_version = "V12"
	print(BaseNet_version)
	print(" Using BaseNet_V12 : 加入多层级融合在编码器阶段")
	
	os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
	model = CPFNet()
	# print(model)
	# model = torch.nn.DataParallel(model).cuda()
	model.cuda()
	with torch.no_grad():
		model.eval()
		torchsummary.summary(model, (3, 256, 256))
