# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 09:57:49 2019

@author: Fsl
"""

import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp import build_aspp

up_kwargs = {'mode': 'bilinear', 'align_corners': True}


# **********CCA_CGL7 : 复现Cascaded Context Enhancement for Automated Skin Lesion Segmentation

class CPFNet(nn.Module):
	#  'BaseNet': CPFNet(out_planes=args.num_classes)
	def __init__(self, out_planes=1, norm_layer=nn.BatchNorm2d):
		super(CPFNet, self).__init__()
		self.out_chanel = out_planes
		self.network = "resnet101"
		if self.network == "resnet101":
			self.backbone = resnet101(pretrained=True)
			self.in_c = [64, 256, 512, 1024, 2048]
		
		self.in_de_c = 256  # 特征解码器对编码器的特征统一降通道
		self.aspp_o_c = 256
		self.aspp_s = build_aspp(self.in_c[-1], self.aspp_o_c)  # 输入的通道为in_c[-1], 输出的通道为256
		#               通道列表          降尺寸通道
		self.cca = CCA(self.in_c, in_de_c=self.in_de_c)
		self.cgl = CGL(in_de_c=self.in_de_c)
		self.main_out_1x1 = ConvBnRelu(3 * self.in_de_c, self.in_de_c, 1, 1, 0)  # in_planes, out_planes, ksize, stride, pad,
		
		# 分割图获取
		self.main_head = BaseNetHead(self.in_de_c, out_planes, 8)
		self.aux_head = BaseNetHead(self.in_de_c, out_planes, 8)
		self.sigmoid_main = torch.nn.Sigmoid()
		self.sigmoid_aux = torch.nn.Sigmoid()
		
	def forward(self, x):
		down = x
		x = self.backbone.conv1(x)
		x = self.backbone.bn1(x)
		x1 = self.backbone.relu(x)  # 1/2  64
		x = self.backbone.maxpool(x1)
		#                                model     resnet34       resnet101
		x2 = self.backbone.layer1(x)   # 1/4       64              256
		x3 = self.backbone.layer2(x2)  # 1/8      128             512
		x4 = self.backbone.layer3(x3)  # 1/8      256             1024
		x5 = self.backbone.layer4(x4)  # 1/8      512             2048

		# print("x2.shape :", x2.shape)
		# print("x3.shape :", x3.shape)
		# print("x4.shape :", x4.shape)
		# print("x5.shape :", x5.shape)

		fa = self.aspp_s(x5)  # 分割
		fc = self.cca(down, x2, x3, x4, x5)
		fl = self.cgl(fc, fa)
		fd = self.main_out_1x1(torch.cat([fc, fa, fl], dim=1))
		
		main_out = self.main_head(fd)
		aux_out = self.aux_head(fc)
		main_out = self.sigmoid_main(main_out)
		aux_out = self.sigmoid_aux(aux_out)
		return main_out, aux_out

	def _initialize_weights(self):
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)


class CCA(nn.Module):
	def __init__(self, in_channels, in_de_c=256):
		super(CCA, self).__init__()
		self.unit1 = CCA_unit(in_channels[1], in_de_c, first=True)
		self.unit2 = CCA_unit(in_channels[2], in_de_c, first=False)
		self.unit3 = CCA_unit(in_channels[3], in_de_c, first=False)
		self.unit4 = CCA_unit(in_channels[4], in_de_c, first=False)

	def forward(self, x, *inputs):
		# def forward(self, down, pre, cur):
		f1 = self.unit1(x, x, inputs[0])
		f2 = self.unit2(x, f1, inputs[1])
		f3 = self.unit3(x, f2, inputs[2])
		f4 = self.unit4(x, f3, inputs[3])
		return f4


class CCA_unit(nn.Module):
	def __init__(self, cur_c, in_de_c, first=False):
		super(CCA_unit, self).__init__()
		self.out_c = in_de_c
		self.first = first

		self.conv_down = nn.Sequential(
			nn.Conv2d(3, self.out_c, 3, padding=1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.cur_1x1 = nn.Sequential(
			nn.Conv2d(cur_c, self.out_c, 1, padding=0, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.fusion_1x1_1 = nn.Sequential(
			nn.Conv2d(2 * self.out_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.fusion_1x1_2 = nn.Sequential(
			nn.Conv2d(2 * self.out_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))

		self.sigmoid1 = torch.nn.Sigmoid()
		self.sigmoid2 = torch.nn.Sigmoid()

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, down, pre, cur):
		# 对原图进行降尺寸到cur相同大小，并用3*3*256卷积提取特征
		# 对当前层级特征进行 1*1*256 降通道
		# 判断当前层级的pre是否用cur替代
		# cat特征pre和cur，并进行2次 1*1conv，sigmoid
		# 两次注意力点乘，3次加法，输出out
		down = F.interpolate(down, cur.size()[2:], **up_kwargs)
		down = self.conv_down(down)
		cur = self.cur_1x1(cur)
		if self.first:
			pre = cur
		if pre.size()[2:] != cur.size()[2:]:
			pre = F.interpolate(pre, cur.size()[2:], **up_kwargs)
		fusion = torch.cat([pre, cur], dim=1)
		fusion1 = self.fusion_1x1_1(fusion)
		fusion1 = self.sigmoid1(fusion1)

		fusion2 = self.fusion_1x1_2(fusion)
		fusion2 = self.sigmoid2(fusion2)
		down = fusion1*down
		pre = fusion2*pre
		pre_down = down + pre
		out = cur + pre_down
		return out


class CGL(nn.Module):
	def __init__(self, in_de_c=256):
		super(CGL, self).__init__()
		self.out_c = in_de_c
		self.fusion_1x1 = nn.Sequential(
			nn.Conv2d(2 * self.out_c, self.out_c, 1, bias=False),
			nn.BatchNorm2d(self.out_c),
			nn.ReLU(inplace=True))
		self.sigmoid1 = torch.nn.Sigmoid()
		self.pam = PAM_Module()
		
	def forward(self, fc, fa):
		fusion = torch.cat([fa, fc], dim=1)
		fusion = self.fusion_1x1(fusion)
		fl = self.sigmoid1(fusion) * fc + fa
		fl = self.pam(fl)
		fl = fl + fa
		return fl


class PAM_Module(nn.Module):
	""" Position attention module"""
	# Ref from SAGAN
	def __init__(self):
		super(PAM_Module, self).__init__()
		self.gamma = nn.Parameter(torch.zeros(1))
		self.softmax = nn.Softmax(dim=-1)
		
	def forward(self, x):
		"""
			inputs :
				x : input feature maps( B X C X H X W)
			returns :
				out : attention value + input feature
				attention: B X (HxW) X (HxW)
		"""
		batchsize, channel, height, width = x.size()
		proj_query = x.view(batchsize, -1, width * height)
		proj_query = proj_query.permute(0, 2, 1)
		proj_key = x.view(batchsize, -1, width * height)
		energy = torch.bmm(proj_query, proj_key)
		attention = self.softmax(energy)
		attention = attention.permute(0, 2, 1)
		proj_value = x.view(batchsize, -1, width * height)
		out = torch.bmm(proj_value, attention)
		out = out.view(batchsize, channel, height, width)
		return out


class BaseNetHead(nn.Module):
	#                  64            1
	# BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
	def __init__(self, in_planes, out_planes, scale):
		super(BaseNetHead, self).__init__()
		
		self.conv_1x1_1x1 = nn.Sequential(
			ConvBnRelu(in_planes, in_planes, 1, 1, 0, has_bn=True, has_relu=True, has_bias=False),
			ConvBnRelu(in_planes, out_planes, 1, 1, 0, has_bn=True, has_relu=True, has_bias=False))

		self.scale = scale
		self.dropout = nn.Dropout(0.1)
		
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		x = self.conv_1x1_1x1(x)
		x = self.dropout(x)
		if self.scale > 1:
			x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
		return x


class ConvBnRelu(nn.Module):
	def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1, groups=1, has_bn=True, has_relu=True, inplace=True, has_bias=False):
		super(ConvBnRelu, self).__init__()
		self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=groups, bias=has_bias)
		self.has_bn = has_bn
		self.has_relu = has_relu
		if self.has_bn:
			self.bn = nn.BatchNorm2d(out_planes)
		if self.has_relu:
			self.relu = nn.ReLU(inplace=inplace)

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		x = self.conv(x)
		if self.has_bn:
			x = self.bn(x)
		if self.has_relu:
			x = self.relu(x)

		return x


BaseNet_version = "CCA_CGL"
if __name__ == '__main__':
	print(" CCA_CGL7 : 复现Cascaded Context Enhancement for Automated Skin Lesion Segmentation")
	import os
	os.environ['CUDA_VISIBLE_DEVICES'] = '0'
	model = CPFNet()
	# print(model)
	# model = torch.nn.DataParallel(model).cuda()
	model.cuda()
	with torch.no_grad():
		model.eval()
		torchsummary.summary(model, (3, 256, 256))
