# -*- coding: utf-8 -*-
"""
@author: Sugurs
"""

import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp2 import build_aspp


up_kwargs = {'mode': 'bilinear', 'align_corners': True}


class ClsSegNet(nn.Module):
	def __init__(self, out_planes=1):
		super(ClsSegNet, self).__init__()
		self.out_chanel = out_planes
		self.network = "resnet101"
		if self.network == "resnet101":
			self.backbone = resnet101(pretrained=True)
			self.in_c = [64, 256, 512, 1024, 2048]
		
		self.in_de_c = 256  # 特征解码器对编码器的特征统一降通道
		self.aspp_o_c = 256
		self.aspp_s = build_aspp(self.in_c[-1], self.aspp_o_c)  # 输入的通道为in_c[-1], 输出的通道为256

		self.conv1x1 = nn.Conv2d(512, 48, kernel_size=1, stride=1,
								  padding=0, dilation=1)

		self.main_head = BaseNetHead(self.in_de_c, out_planes, 4)
		self.sigmoid_main = torch.nn.Sigmoid()

	def forward(self, x):
		x = self.backbone.conv1(x)
		x = self.backbone.bn1(x)
#                                		model     resnet34       resnet101
		x1 = self.backbone.relu(x)	   # 1/2  	   64
		x = self.backbone.maxpool(x1)
		x2 = self.backbone.layer1(x)   # 1/4       64             256
		x3 = self.backbone.layer2(x2)  # 1/8      128             512
		x4 = self.backbone.layer3(x3)  # 1/8      256             1024
		x5 = self.backbone.layer4(x4)  # 1/8      512             2048

		########################### ClsNet #########################
		x_cls = self.backbone.avgpool(x5)
		x_cls = x.view(x_cls.size(0), -1)
		x_cls_out = self.backbone.fc(x_cls)

		########################### SegNet #########################
		x_seg = self.aspp_s(x5)  # 分割
		x3 = self.conv1x1(x3)
		x_seg = torch.cat([x_seg, x3], dim=1)
		x_seg_out = self.sigmoid_main(self.main_head(x_seg))
		return x_cls_out, x_seg_out

	def _initialize_weights(self):
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)


class BaseNetHead(nn.Module):
	#                  64            1
	# BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
	def __init__(self, in_planes, out_planes, scale):
		super(BaseNetHead, self).__init__()

		self.conv_1x1_1x1 = nn.Sequential(
			ConvBnRelu(in_planes, out_planes, 3, 1, 1, has_bn=True, has_relu=True, has_bias=False))

		self.scale = scale
		self.dropout = nn.Dropout(0.1)

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		x = self.conv_1x1_1x1(x)
		if self.scale > 1:
			x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
		return x


class ConvBnRelu(nn.Module):
	def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1, groups=1, has_bn=True, has_relu=True, inplace=True, has_bias=False):
		super(ConvBnRelu, self).__init__()
		self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=groups, bias=has_bias)
		self.has_bn = has_bn
		self.has_relu = has_relu
		if self.has_bn:
			self.bn = nn.BatchNorm2d(out_planes)
		if self.has_relu:
			self.relu = nn.ReLU(inplace=inplace)

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_uniform_(m.weight.data)
				if m.bias is not None:
					m.bias.data.zero_()
			elif isinstance(m, nn.BatchNorm2d):
				init.normal_(m.weight.data, 1.0, 0.02)
				init.constant_(m.bias.data, 0.0)

	def forward(self, x):
		x = self.conv(x)
		if self.has_bn:
			x = self.bn(x)
		if self.has_relu:
			x = self.relu(x)

		return x


BaseNet_version = "ClsSegNet"
if __name__ == '__main__':
	print("Load ClsSegNet Model……")
	# import os
	# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
	model = ClsSegNet()
	torchsummary.summary(model, (3, 256, 256))
	# print(model)
	# model = torch.nn.DataParallel(model).cuda()
	# model.cuda()
	# with torch.no_grad():
	# 	model.eval()
	# 	torchsummary.summary(model, (3, 256, 256))
