# ----------------------------------------
# Written by Yude Wang
# ----------------------------------------

import numpy as np
import torch 
import torch.nn as nn
import torch.nn.functional as F
from net.sync_batchnorm import SynchronizedBatchNorm2d
from torch.nn import init
from net.backbone import build_backbone
from net.ASPP import ASPP
import layer.function as fun

class deeplabv3plus(nn.Module):
	def __init__(self, cfg):
		super(deeplabv3plus, self).__init__()
		self.backbone = None		
		self.backbone_layers = None
		input_channel = 2048		
		self.aspp = ASPP(dim_in=input_channel, 
				dim_out=cfg.MODEL_ASPP_OUTDIM,  # 256
				rate=16//cfg.MODEL_OUTPUT_STRIDE, # 16
				bn_mom = cfg.TRAIN_BN_MOM) # 0.0003
		self.dropout1 = nn.Dropout(0.5)
		self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
		self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.MODEL_OUTPUT_STRIDE//4) # 16//4 = 4

		indim = 256
		self.shortcut_conv = nn.Sequential(
				nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL//2,bias=True),# 48  １  1
				SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), # 48
				nn.ReLU(inplace=True),		
		)		
		self.cat_conv = nn.Sequential(
				nn.Conv2d(cfg.MODEL_ASPP_OUTDIM+cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True), # 256+48=304   256
				SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
				nn.ReLU(inplace=True),
				nn.Dropout(0.5),
				nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=True),
				SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM),
				nn.ReLU(inplace=True),
				nn.Dropout(0.1),
		)
		self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) # 这里可以设置为1，作为mask输出
		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
			elif isinstance(m, SynchronizedBatchNorm2d):
				nn.init.constant_(m.weight, 1)
				nn.init.constant_(m.bias, 0)
		self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) # res101_atrous     16
		self.backbone_layers = self.backbone.get_layers()

		##########################
		# self.norm = fun.l2normalization(scale=1)

	def forward(self, x):
		x_bottom = self.backbone(x)
		layers = self.backbone.get_layers() # layers[0]:256×0.25W×0.25H and layers[1]:256×0.075W×0.075H
		feature_aspp = self.aspp(layers[-1])  # 256×0.075W×0.075H
		feature_aspp = self.dropout1(feature_aspp)
		feature_aspp_ = self.upsample_sub(feature_aspp) # 256×0.25W×0.25H

		feature_shallow = self.shortcut_conv(layers[0]) # 48×0.25W×0.25H
		feature_cat = torch.cat([feature_aspp_,feature_shallow],1) # 304×0.25W×0.25H
		result = self.cat_conv(feature_cat)  # 256×0.25W×0.25H
		# result_ = self.cls_conv(result) # 1×0.25W×0. 25H
		# result = self.upsample4(result_)  #1×W×H
		return result, layers[0], feature_aspp_ # torch.Size([4, 1, 1024, 1024]) torch.Size([4, 256, 256, 256]) torch.Size([4, 256, 64, 64])

class SiameseNet(nn.Module):
    def __init__(self, cfg, norm_flag = 'l2'):
        super(SiameseNet, self).__init__()
        self.CNN = deeplabv3plus(cfg)
        if norm_flag == 'l2':
           self.norm = fun.l2normalization(scale=1)
        if norm_flag == 'exp':
            self.norm = nn.Softmax2d()
    '''''''''
    def forward(self,t0,t1):

        out_t0_embedding = self.CNN(t0)
        out_t1_embedding = self.CNN(t1)
        #out_t0_conv5_norm,out_t1_conv5_norm = self.norm(out_t0_conv5),self.norm(out_t1_conv5)
        #out_t0_fc7_norm,out_t1_fc7_norm = self.norm(out_t0_fc7),self.norm(out_t1_fc7)
        out_t0_embedding_norm,out_t1_embedding_norm = self.norm(out_t0_embedding),self.norm(out_t1_embedding)
        return [out_t0_embedding_norm,out_t1_embedding_norm]
    '''''''''

    def forward(self,t0,t1):

        out_t0_conv5,out_t0_fc7,out_t0_embedding = self.CNN(t0)
        out_t1_conv5,out_t1_fc7,out_t1_embedding = self.CNN(t1)
        out_t0_conv5_norm,out_t1_conv5_norm = self.norm(out_t0_conv5),self.norm(out_t1_conv5)
        out_t0_fc7_norm,out_t1_fc7_norm = self.norm(out_t0_fc7),self.norm(out_t1_fc7)
        out_t0_embedding_norm,out_t1_embedding_norm = self.norm(out_t0_embedding),self.norm(out_t1_embedding)
        # print(out_t0_conv5_norm.shape, out_t1_conv5_norm.shape, out_t0_fc7_norm.shape, out_t1_fc7_norm.shape,
        #       out_t0_embedding_norm.shape, out_t1_embedding_norm.shape)
        return [out_t0_conv5_norm,out_t1_conv5_norm],[out_t0_fc7_norm,out_t1_fc7_norm],[out_t0_embedding_norm,out_t1_embedding_norm]

    '''''''''
    def forward(self,t0,t1):

        out_t0_conv4,out_t0_conv5,out_t0_fc7,out_t0_embedding = self.CNN(t0)
        out_t1_conv4,out_t1_conv5,out_t1_fc7,out_t1_embedding = self.CNN(t1)
        out_t0_conv4_norm,out_t1_conv4_norm = self.norm(out_t0_conv5),self.norm(out_t1_conv5)
        out_t0_conv5_norm,out_t1_conv5_norm = self.norm(out_t0_conv5),self.norm(out_t1_conv5)
        out_t0_fc7_norm,out_t1_fc7_norm = self.norm(out_t0_fc7),self.norm(out_t1_fc7)
        out_t0_embedding_norm,out_t1_embedding_norm = self.norm(out_t0_embedding),self.norm(out_t1_embedding)
        return [out_t0_conv4_norm,out_t1_conv4_norm],[out_t0_conv5_norm,out_t1_conv5_norm],[out_t0_fc7_norm,out_t1_fc7_norm],[out_t0_embedding_norm,out_t1_embedding_norm]
    '''''''''
    '''''''''
    def forward(self,t0,t1):

        out_t0_conv4,out_t0_conv5,out_t0_fc7 = self.CNN(t0)
        out_t1_conv4,out_t1_conv5,out_t1_fc7 = self.CNN(t1)
        out_t0_conv4_norm,out_t1_conv4_norm = self.norm(out_t0_conv4),self.norm(out_t1_conv4)
        out_t0_conv5_norm,out_t1_conv5_norm = self.norm(out_t0_conv5),self.norm(out_t1_conv5)
        out_t0_fc7_norm,out_t1_fc7_norm = self.norm(out_t0_fc7),self.norm(out_t1_fc7)
        return [out_t0_conv4_norm,out_t1_conv4_norm],[out_t0_conv5_norm,out_t1_conv5_norm],[out_t0_fc7_norm,out_t1_fc7_norm]
    '''''''''

    # def init_parameters_from_deeplab(self,pretrain_vgg16_1024):
	#
    #     ##### init parameter using pretrain vgg16 model ###########
    #     pretrain_dict_names = convert_dict_names_for_fucking_faults()
    #     keys = sorted(pretrain_dict_names.keys())
    #     conv_blocks = [self.CNN.conv1,
    #                    self.CNN.conv2,
    #                    self.CNN.conv3,
    #                    self.CNN.conv4,
    #                    self.CNN.conv5]
    #     ranges = [[0,2], [0,2], [0,2,4], [0,2,4], [0,2,4]]
    #     for key in keys:
    #         dic_name = pretrain_dict_names[key]
    #         base_conv_name,conv_index,sub_index = dic_name[:5],int(dic_name[4]),int(dic_name[-1])
    #         conv_blocks[conv_index -1][ranges[sub_index -1][sub_index -1]].weight.data = pretrain_vgg16_1024[key + '.weight']
    #         conv_blocks[conv_index- 1][ranges[sub_index -1][sub_index -1]].bias.data = pretrain_vgg16_1024[key + '.bias']
	#
    #     ####### init fc parameters (transplant) ##############
    #     self.CNN.fc6_1[0].weight.data = pretrain_vgg16_1024['fc6_1.0.weight'].view(self.CNN.fc6_1[0].weight.size())
    #     self.CNN.fc6_1[0].bias.data = pretrain_vgg16_1024['fc6_1.0.bias'].view(self.CNN.fc6_1[0].bias.size())
	#
    #     self.CNN.fc7_1[0].weight.data = pretrain_vgg16_1024['fc7_1.0.weight'].view(self.CNN.fc7_1[0].weight.size())
    #     self.CNN.fc7_1[0].bias.data = pretrain_vgg16_1024['fc7_1.0.bias'].view(self.CNN.fc7_1[0].bias.size())
	#
    #     self.CNN.fc6_2[0].weight.data = pretrain_vgg16_1024['fc6_2.0.weight'].view(self.CNN.fc6_2[0].weight.size())
    #     self.CNN.fc6_2[0].bias.data = pretrain_vgg16_1024['fc6_2.0.bias'].view(self.CNN.fc6_2[0].bias.size())
	#
    #     self.CNN.fc7_2[0].weight.data = pretrain_vgg16_1024['fc7_2.0.weight'].view(self.CNN.fc7_2[0].weight.size())
    #     self.CNN.fc7_2[0].bias.data = pretrain_vgg16_1024['fc7_2.0.bias'].view(self.CNN.fc7_2[0].bias.size())
	#
    #     self.CNN.fc6_3[0].weight.data = pretrain_vgg16_1024['fc6_3.0.weight'].view(self.CNN.fc6_3[0].weight.size())
    #     self.CNN.fc6_3[0].bias.data = pretrain_vgg16_1024['fc6_3.0.bias'].view(self.CNN.fc6_3[0].bias.size())
	#
    #     self.CNN.fc7_3[0].weight.data = pretrain_vgg16_1024['fc7_3.0.weight'].view(self.CNN.fc7_3[0].weight.size())
    #     self.CNN.fc7_3[0].bias.data = pretrain_vgg16_1024['fc7_3.0.bias'].view(self.CNN.fc7_3[0].bias.size())
	#
    #     self.CNN.fc6_4[0].weight.data = pretrain_vgg16_1024['fc6_4.0.weight'].view(self.CNN.fc6_4[0].weight.size())
    #     self.CNN.fc6_4[0].bias.data = pretrain_vgg16_1024['fc6_4.0.bias'].view(self.CNN.fc6_4[0].bias.size())
	#
    #     self.CNN.fc7_4[0].weight.data = pretrain_vgg16_1024['fc7_4.0.weight'].view(self.CNN.fc7_4[0].weight.size())
    #     self.CNN.fc7_4[0].bias.data = pretrain_vgg16_1024['fc7_4.0.bias'].view(self.CNN.fc7_4[0].bias.size())
	#
    #     #init.kaiming_uniform(self.CNN.embedding_layer.weight.data,mode='fan_in')
    #     #init.constant(self.CNN.embedding_layer.bias.data,0)
	#
    # def init_parameters(self,pretrain_vgg16_1024):
	#
    #     ##### init parameter using pretrain vgg16 model ###########
    #     conv_blocks = [self.CNN.conv1,
    #                    self.CNN.conv2,
    #                    self.CNN.conv3,
    #                    self.CNN.conv4,
    #                    self.CNN.conv5]
	#
    #     ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
    #     features = list(pretrain_vgg16_1024.features.children())
    #     for idx, conv_block in enumerate(conv_blocks):
    #         for l1, l2 in zip(features[ranges[idx][0]:ranges[idx][1]], conv_block):
    #             if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
    #                 # print idx, l1, l2
    #                 assert l1.weight.size() == l2.weight.size()
    #                 assert l1.bias.size() == l2.bias.size()
    #                 l2.weight.data = l1.weight.data
    #                 l2.bias.data = l1.bias.data
	#
    #     ####### init fc parameters (transplant) ##############
	#
    #     self.CNN.fc6[0].weight.data = pretrain_vgg16_1024.classifier[0].weight.data.view(self.CNN.fc6[0].weight.size())
    #     self.CNN.fc6[0].bias.data = pretrain_vgg16_1024.classifier[0].bias.data.view(self.CNN.fc6[0].bias.size())
	#
    #     self.CNN.fc7[0].weight.data = pretrain_vgg16_1024.classifier[3].weight.data.view(self.CNN.fc7[0].weight.size())
    #     self.CNN.fc7[0].bias.data = pretrain_vgg16_1024.classifier[3].bias.data.view(self.CNN.fc7[0].bias.size())