# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 09:57:49 2019

@author: Fsl
"""

import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp import build_aspp

up_kwargs = {'mode': 'bilinear', 'align_corners': True}


# **********V8-边界辅助分割，利用特征图辅助，引入不同尺度的原图数据

class CPFNet(nn.Module):
	#  'BaseNet': CPFNet(out_planes=args.num_classes)
	def __init__(self, out_planes=1, ccm=True, norm_layer=nn.BatchNorm2d, is_training=True, expansion=2, base_channel=32):
		super(CPFNet, self).__init__()
		self.out_chanel = out_planes

		print('v8')
		self.network = "resnet34"
		# self.network = "resnet101"

		if self.network == "resnet34":
			self.backbone = resnet34(pretrained=True)
			self.in_c = [64, 64, 128, 256, 512]
		# elif self.network == "resnet101":
		# 	self.backbone = resnet101(pretrained=True)
		# 	self.in_c = [64, 256, 512, 1024, 2048]

		# 输入的通道为in_c[-1]
		# 输出的通道为256
		self.aspp_o_c = 256
		self.aspp_s = build_aspp(self.in_c[-1], self.aspp_o_c)
		self.aspp_e = build_aspp(self.in_c[-1], self.aspp_o_c)

		self.in_de_c = 256  # 特征解码器对编码器的特征统一降通道
		#                        通道列表     Aspp输出通道           降尺寸通道        任务类型 F===edge, T===seg
		self.decoder_e = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=False)
		self.decoder_s = Decoder(self.in_c, self.aspp_o_c, in_de_c=self.in_de_c, task_type=True)

		# 边界注意力模块
		self.BaSm2 = BaSm(self.in_c[-4], self.in_de_c)
		self.BaSm3 = BaSm(self.in_c[-3], self.in_de_c)
		self.BaSm4 = BaSm(self.in_c[-2], self.in_de_c)
		self.BaSm5 = BaSm(self.in_c[-1], self.in_de_c)

		# 主要的边界图，分割图获取
		self.main_head_e = BaseNetHead(self.in_de_c, out_planes, 4, is_aux=False, norm_layer=norm_layer)
		# self.main_head_s = BaseNetHead(self.in_de_c, out_planes, 4, is_aux=False, norm_layer=norm_layer)

		# 辅助分割图的获取
		self.main_head_s2 = BaseNetHead(self.in_de_c, out_planes, 4, is_aux=False, norm_layer=norm_layer)
		self.main_head_s3 = BaseNetHead(self.in_de_c, out_planes, 8, is_aux=False, norm_layer=norm_layer)
		self.main_head_s4 = BaseNetHead(self.in_de_c, out_planes, 8, is_aux=False, norm_layer=norm_layer)
		self.main_head_s5 = BaseNetHead(self.in_de_c, out_planes, 8, is_aux=False, norm_layer=norm_layer)

		self.sigmoid_e = torch.nn.Sigmoid()
		# self.sigmoid_s = torch.nn.Sigmoid()

		self.sigmoid_s2 = torch.nn.Sigmoid()
		self.sigmoid_s3 = torch.nn.Sigmoid()
		self.sigmoid_s4 = torch.nn.Sigmoid()
		self.sigmoid_s5 = torch.nn.Sigmoid()

	def forward(self, x):
		x = self.backbone.conv1(x)
		x = self.backbone.bn1(x)
		x1 = self.backbone.relu(x)  # 1/2  64
		x = self.backbone.maxpool(x1)
		#                                model     resnet34       resnet101
		x2 = self.backbone.layer1(x)  # 1/4      64              256
		x3 = self.backbone.layer2(x2)  # 1/8      128             512
		x4 = self.backbone.layer3(x3)  # 1/8      256             1024
		x5 = self.backbone.layer4(x4)  # 1/8      512             2048

		# print("x2.shape :", x2.shape)
		# print("x3.shape :", x3.shape)
		# print("x4.shape :", x4.shape)
		# print("x5.shape :", x5.shape)

		s5 = self.aspp_s(x5)  # 分割
		e5 = self.aspp_e(x5)  # 边界

		# 获取边界预测图
		edge2, edge3, edge4, edge5 = self.decoder_e(e5, x2, x3, x4, x5)
		
		edge = self.main_head_e(edge2)
		out_edge = self.sigmoid_e(edge)

		# print("edge.shape", edge.shape)
		# print("out_edge.shape", out_edge.shape)

		# def forward(self, edge, x):
		ed5 = self.BaSm5(edge5, x5)
		ed4 = self.BaSm4(edge4, x4)
		ed3 = self.BaSm3(edge3, x3)
		ed2 = self.BaSm2(edge2, x2)

		# print("ed5.shape", ed5.shape)
		# print("ed4.shape", ed4.shape)
		# print("ed3.shape", ed3.shape)
		# print("ed2.shape", ed2.shape)

		# 获取分割预测图

		seg2, seg3, seg4, seg5 = self.decoder_s(s5, ed2, ed3, ed4, ed5)

		# print(seg2.shape)
		# print(seg3.shape)
		# print(seg4.shape)
		# print(seg5.shape)

		seg2 = self.main_head_s2(seg2)
		seg3 = self.main_head_s3(seg3)
		seg4 = self.main_head_s4(seg4)
		seg5 = self.main_head_s5(seg5)

		out_seg2 = self.sigmoid_s2(seg2)
		out_seg3 = self.sigmoid_s3(seg3)
		out_seg4 = self.sigmoid_s4(seg4)
		out_seg5 = self.sigmoid_s5(seg5)

		# print("out_seg2.shape", out_seg2.shape)
		# print("out_seg3.shape", out_seg2.shape)
		# print("out_seg4.shape", out_seg2.shape)
		# print("out_seg5.shape", out_seg2.shape)

		return out_seg2, out_edge, out_seg3, out_seg4, out_seg5

	def _initialize_weights(self):
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)
	# m.weight.data.fill_(1)
	# m.bias.data.zero_()


class Decoder(nn.Module):
	def __init__(self, in_channels, bos_c, in_de_c=128, task_type=False):
		super(Decoder, self).__init__()
		self.task_type = task_type

		self.fuse_x5_bos5 = DecoderUnit(in_channels[-1], bos_c, in_de_c, 4)
		self.fuse_x4_x5 = DecoderUnit(in_channels[-2], in_channels[-1], in_de_c, 4)
		self.fuse_x3_x4 = DecoderUnit(in_channels[-3], in_channels[-2], in_de_c, 4)
		self.fuse_x2_x3 = DecoderUnit(in_channels[-4], in_channels[-3], in_de_c, 8)

	# self.fuse_x1_x2 = DecoderUnit(in_channels[-5], in_channels[-4], in_de_c)

	def forward(self, bos5, *inputs):
		# inputs = [ed1 ed2 ed3 ed4 ed5]   bos5
		d5 = self.fuse_x5_bos5(inputs[-1], bos5)
		d4 = self.fuse_x4_x5(inputs[-2], d5)
		d3 = self.fuse_x3_x4(inputs[-3], d4)
		d2 = self.fuse_x2_x3(inputs[-4], d3)
		# d1 = self.fuse_x1_x2(inputs[-5], d2)

		if self.task_type:
			return d2, d3, d4, d5
		elif not self.task_type:
			return d2, d3, d4, d5


class DecoderUnit(nn.Module):
	def __init__(self, pre_c, back_c, in_de_c, pool_size):
		super(DecoderUnit, self).__init__()
		self.out_c = in_de_c
		# print(self.out_c/4)
		self.conv_1x1_back = nn.Sequential(
			nn.Conv2d(back_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.conv_1x1_pre = nn.Sequential(
			nn.Conv2d(pre_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.conv3x3 = nn.Sequential(
			nn.Conv2d(2 * self.out_c, self.out_c, 3, padding=1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.sigmoid = torch.nn.Sigmoid()

		self.pool = nn.AvgPool2d(pool_size, pool_size)
		self.att_1x1_1 = nn.Sequential(
			nn.Conv2d(self.out_c, int(self.out_c/4), 3, padding=1, bias=False),
			nn.ReLU(inplace=True))
		self.att_1x1_2 = nn.Sequential(
			nn.Conv2d(int(self.out_c/4), self.out_c, 3, padding=1, bias=False),
			nn.ReLU(inplace=True))

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, pre, back):
		# 边界的解码器的需要降通道，分割的通道在BaSm里面已经降到256
		# 首先,对编码器特征降通道到 ,包括back特征和pre特征
		# 对高级特征上采样   up_kwargs = {'mode': 'bilinear', 'align_corners': True}    size()[2:]
		# concat拼接
		# 卷积降通道  3*3融合
		# sigmoid自注意力
		if back.size()[1] != self.out_c:
			back = self.conv_1x1_back(back)
		if back.size()[2:] != pre.size()[2:]:
			back = F.interpolate(back, pre.size()[2:], **up_kwargs)
		if pre.size()[1] != self.out_c:
			pre = self.conv_1x1_pre(pre)
		fusion = torch.cat([pre, back], dim=1)
		fusion = self.conv3x3(fusion)

		pool_att = self.pool(fusion)
		pool_att = self.att_1x1_1(pool_att)
		pool_att = self.att_1x1_2(pool_att)
		pool_att = F.interpolate(pool_att, fusion.size()[2:], **up_kwargs)
		pool_att = self.sigmoid(pool_att)
		fusion = fusion * pool_att + fusion
		# fusion = fusion * pool_att + fusion
		return fusion


class BaSm(nn.Module):
	def __init__(self, inchannels, in_de_c):
		super(BaSm, self).__init__()
		self.conv1x1 = nn.Sequential(
			nn.Conv2d(inchannels, in_de_c, 3, padding=1, bias=False),
			nn.BatchNorm2d(in_de_c),
			nn.ReLU(inplace=True))

		self.sigmoid = nn.Sigmoid()
		self.conv3x3 = nn.Conv2d(in_de_c*2, in_de_c, 3, padding=1, bias=False)
		self.dropout = nn.Dropout(0.1)

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, edge, x):
		# 特征图降通道
		# x Sigmoid自注意力
		# 边界注意力
		# add 操作融合
		# concat拼接
		# 3*3卷积融合
		# dropout
		x = self.conv1x1(x)
		x1 = x * self.sigmoid(x)
		x2 = edge * x
		fusion = x1 + x2
		fusion = torch.cat([x, fusion], dim=1)
		fusion = self.conv3x3(fusion)
		fusion = self.dropout(fusion)
		return fusion


class BaseNetHead(nn.Module):
	#                  64            1              是否中间缩一下通道数到32
	# BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
	def __init__(self, in_planes, out_planes, scale, is_aux=False, norm_layer=nn.BatchNorm2d):
		super(BaseNetHead, self).__init__()
		if is_aux:
			self.conv_1x1_3x3 = nn.Sequential(
				ConvBnRelu(in_planes, 64, 1, 1, 0, has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False),
				ConvBnRelu(64, 64, 3, 1, 1, has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False))
		else:
			self.conv_1x1_3x3 = nn.Sequential(
				ConvBnRelu(in_planes, 32, 1, 1, 0,
				           has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False),
				ConvBnRelu(32, 32, 3, 1, 1,
				           has_bn=True, norm_layer=norm_layer,
				           has_relu=True, has_bias=False))
		self.dropout = nn.Dropout(0.5)
		if is_aux:
			self.conv_1x1_2 = nn.Conv2d(64, out_planes, kernel_size=1,
			                            stride=1, padding=0)
		else:
			self.conv_1x1_2 = nn.Conv2d(32, out_planes, kernel_size=1,
			                            stride=1, padding=0)
		self.scale = scale

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		if self.scale > 1:
			x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
		fm = self.conv_1x1_3x3(x)
		fm = self.dropout(fm)
		output = self.conv_1x1_2(fm)
		return output


class ConvBnRelu(nn.Module):
	def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
	             groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
	             has_relu=True, inplace=True, has_bias=False):
		super(ConvBnRelu, self).__init__()

		self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
		                      stride=stride, padding=pad,
		                      dilation=dilation, groups=groups, bias=has_bias)
		self.has_bn = has_bn
		if self.has_bn:
			self.bn = nn.BatchNorm2d(out_planes)
		self.has_relu = has_relu
		if self.has_relu:
			self.relu = nn.ReLU(inplace=inplace)

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		x = self.conv(x)
		if self.has_bn:
			x = self.bn(x)
		if self.has_relu:
			x = self.relu(x)

		return x


BaseNet_version = "V8"
# print(BaseNet_version)
# print(" Using BaseNet_V7 : 边界辅助分割，参考透明物体分割,加入多个损失,resnet101")

if __name__ == '__main__':
	import os
	
	BaseNet_version = "V8"
	print(BaseNet_version)
	print(" Using BaseNet_V7 : 边界辅助分割，参考透明物体分割,加入多个损失,resnet101")
	
	os.environ['CUDA_VISIBLE_DEVICES'] = '0'
	model = CPFNet()
	# print(model)
	# model = torch.nn.DataParallel(model).cuda()
	model.cuda()
	with torch.no_grad():
		model.eval()
		torchsummary.summary(model, (3, 256, 256))
