import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder
from modeling.backbone import build_backbone
from modeling.backbone import resnet, xception, drn, mobilenet
import torchsummary
from modeling.resnet import resnet34, resnet101


class Network(nn.Module):
	def __init__(self, output_stride=8, num_classes=1, sync_bn=True, cls_classes=3):
		super(Network, self).__init__()

		self.network = "resnet34"
		# self.network = "resnet101"

		if self.network == "resnet34":
			self.backbone_cls = resnet34(pretrained=True)
			self.backbone_seg = resnet34(pretrained=True)
			self.backbone_fusion = resnet34(pretrained=True)
			self.in_c = [64, 64, 128, 256, 512]
		elif self.network == "resnet101":
			self.backbone_cls = resnet101(pretrained=True)
			self.backbone_seg = resnet101(pretrained=True)
			self.backbone_fusion = resnet101(pretrained=True)
			self.in_c = [64, 256, 512, 1024, 2048]
		
		if sync_bn:
			BatchNorm = SynchronizedBatchNorm2d
		else:
			BatchNorm = nn.BatchNorm2d

		self.aspp_s = build_aspp(self.in_c[-1], output_stride, BatchNorm)
		self.conv_cs = nn.Sequential(
			nn.Conv2d(self.in_c[-1], 256, kernel_size=1, stride=1, padding=0, bias=False),
			nn.BatchNorm2d(256),
			nn.ReLU(inplace=True))
		
		self.conv_s_cs = nn.Sequential(
			nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False),
			nn.BatchNorm2d(256),
			nn.ReLU(inplace=True))
		
		self.decoder = build_decoder(num_classes, self.in_c[1], BatchNorm)
		self.avgpool = nn.AvgPool2d(8, stride=1)
		self.fc = nn.Linear(self.in_c[-1]*2, cls_classes)
		self.sigmoid = nn.Sigmoid()

		self.fusion1 = Fusion(self.in_c[-4])

		self.fusion2 = Fusion(self.in_c[-3])
		self.fusion2_2 = Fusion(self.in_c[-3])

		self.fusion3 = Fusion(self.in_c[-2])
		self.fusion3_2 = Fusion(self.in_c[-2])

		self.fusion4 = Fusion(self.in_c[-1])
		self.fusion4_2 = Fusion(self.in_c[-1])

		
	def forward(self, input):
		cls_x = self.backbone_cls.conv1(input)
		cls_x = self.backbone_cls.bn1(cls_x)
		cls_x = self.backbone_cls.relu(cls_x)
		cls_x = self.backbone_cls.maxpool(cls_x)
		
		seg_x = self.backbone_seg.conv1(input)
		seg_x = self.backbone_seg.bn1(seg_x)
		seg_x = self.backbone_seg.relu(seg_x)
		seg_x = self.backbone_seg.maxpool(seg_x)
		
		cls_x = self.backbone_cls.layer1(cls_x)
		seg_x = self.backbone_seg.layer1(seg_x)
		low_feat = seg_x

		# fusion 1
		feat = torch.cat([cls_x, seg_x], dim=1)
		feat = self.fusion1(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		cls_x = attention_1 * cls_x
		seg_x = attention_2 * seg_x
		fusion1 = cls_x + seg_x
		
		cls_x = self.backbone_cls.layer2(cls_x)
		seg_x = self.backbone_seg.layer2(seg_x)
		fusion1 = self.backbone_fusion.layer2(fusion1)
		
		# fusion 2
		feat = torch.cat([cls_x, seg_x], dim=1)
		feat = self.fusion2(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		cls_x = attention_1 * cls_x
		seg_x = attention_2 * seg_x
		fusion2 = cls_x + seg_x

		feat = torch.cat([fusion1, fusion2], dim=1)
		feat = self.fusion2_2(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		fusion1 = attention_1 * fusion1
		fusion2 = attention_2 * fusion2
		fusion2 = fusion1 + fusion2

		cls_x = self.backbone_cls.layer3(cls_x)
		seg_x = self.backbone_seg.layer3(seg_x)
		fusion2 = self.backbone_fusion.layer3(fusion2)

		# fusion 3
		feat = torch.cat([cls_x, seg_x], dim=1)
		feat = self.fusion3(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		cls_x = attention_1 * cls_x
		seg_x = attention_2 * seg_x
		fusion3 = cls_x + seg_x

		feat = torch.cat([fusion2, fusion3], dim=1)
		feat = self.fusion3_2(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		fusion2 = attention_1 * fusion2
		fusion3 = attention_2 * fusion3
		fusion3 = fusion2 + fusion3

		cls_x = self.backbone_cls.layer4(cls_x)
		seg_x = self.backbone_seg.layer4(seg_x)
		fusion3 = self.backbone_fusion.layer4(fusion3)

		# fusion 4
		feat = torch.cat([cls_x, seg_x], dim=1)
		feat = self.fusion4(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		cls_x = attention_1 * cls_x
		seg_x = attention_2 * seg_x
		fusion4 = cls_x + seg_x

		feat = torch.cat([fusion3, fusion4], dim=1)
		feat = self.fusion4_2(feat)
		attention = F.softmax(feat, dim=1)
		attention_1 = attention[:, 0, :, :].unsqueeze(1)
		attention_2 = attention[:, 1, :, :].unsqueeze(1)
		fusion3 = attention_1 * fusion3
		fusion4 = attention_2 * fusion4
		fusion4 = fusion3 + fusion4
		fusion = fusion4
		
		# 分类部分：
		cls_x = self.backbone_cls.avgpool(cls_x)
		cls_x = cls_x.view(cls_x.size(0), -1)
		fusion_cls = self.avgpool(fusion)
		fusion_cls = fusion_cls.view(fusion_cls.size(0), -1)
		cls_x = torch.cat([cls_x, fusion_cls], dim=1)
		cls_x = self.fc(cls_x)
		
		# 分割
		seg_x = self.aspp_s(seg_x)
		fusion_seg = self.conv_cs(fusion)
		seg_x = torch.cat([seg_x, fusion_seg], dim=1)
		seg_x = self.conv_s_cs(seg_x)

		print(seg_x.size(), low_feat.size())
		seg_x = self.decoder(seg_x, low_feat)
		seg_x = F.interpolate(seg_x, size=input.size()[2:], mode='bilinear', align_corners=True)

		seg_x = self.sigmoid(seg_x)
		
		return seg_x, cls_x


class Fusion(nn.Module):
    def __init__(self, a):
        super(Fusion, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=2 * a, out_channels=a, dilation=1, kernel_size=1, padding=0)
        self.conv2 = nn.Conv2d(in_channels=a, out_channels=a//2, dilation=1, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(in_channels=a//2, out_channels=2, dilation=1, kernel_size=3, padding=1)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):

        feat = self.conv1(x)
        feat = self.relu(feat)
        feat = self.conv2(feat)
        feat = self.relu(feat)
        feat = self.conv3(feat)
        return feat


BaseNet_version = 'SC_V1'

if __name__ == '__main__':
	net = Network(output_stride=32, num_classes=3, sync_bn=True, cls_classes=3)
	# net.cuda()
	torchsummary.summary(net, (3, 256, 256))