import torch
from torch import nn
from torch.nn import init
# from torchvision.models.resnet import BasicBlock, ResNet, Bottleneck
from net.backbone import build_backbone
import layer.function as fun
import math
from net.segnet import SegResNet, SiameseNet
import torch.nn.functional as F
from torch.autograd import Variable

def conv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)

def conv1x1(in_planes, out_planes, stride=1):
    "1x1 convolution without padding"
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
                     padding=0, bias=False)

class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class RFM_BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(RFM_BasicBlock, self).__init__()
        self.conv0 = conv1x1(inplanes, planes, 1)
        self.conv1 = conv3x3(planes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = conv1x1(planes, 1, 1)


        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv0(x)

        out = self.conv1(out)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

class RFM_BasicBlock3_3(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(RFM_BasicBlock3_3, self).__init__()
        self.conv0 = conv3x3(inplanes, planes, 1)
        self.conv1 = conv3x3(planes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = conv3x3(planes, 1, 1)


        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv0(x)

        out = self.conv1(out)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

def various_distance(out_vec_t0, out_vec_t1,dist_flag):
    if dist_flag == 'l2':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
    if dist_flag == 'l1':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)
    if dist_flag == 'cos':
        distance = 1 - F.cosine_similarity(out_vec_t0, out_vec_t1)
    return distance

def get_change_maps(t0, t1):
    n, c, h, w = t0.data.shape
    change_maps = t0.new_ones(n, 1, h, w) # get a new Tensor ,which type is as same as t0's.
    for i in range(n):
        out_t0_, out_t1_ = t0[i, :, :, :], t1[i, :, :, :]
        out_t0_rz = torch.transpose(out_t0_.view(c, h * w), 1, 0)  # transpose torch.Size([262144, 64])
        out_t1_rz = torch.transpose(out_t1_.view(c, h * w), 1, 0)

        # gt_rz = Variable(torch.transpose(ground_truth.view(1,h*w),1,0))
        distance = various_distance(out_t0_rz, out_t1_rz, dist_flag='l2') # ([262144, 1])
        change_map = distance.view(1,1,h, w) # (512,512)
        change_maps[i, :, :, :] = change_map
    # print(change_maps.shape, change_maps.dtype, t0.dtype)
    return change_maps

# def normFUSE(d):
#     ma = torch.max(d)
#     mi = torch.min(d)
#     ma, mi = Variable(ma), Variable(mi, requires_grad=True)
#     dn = (d-mi)/(ma-mi)
#     dn = Variable(dn, requires_grad=True)
#     return dn


class fusenet(nn.Module):
    def __init__(self, SiameseNet):
        super(fusenet, self).__init__()
        self.SiameseNet = SiameseNet

        self.bottomconv = nn.Conv2d(256, 32, 3, padding=1)
        self.middleconv = nn.Conv2d(64, 32, 3, padding=1)

        self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
        self.upsample16 = nn.UpsamplingBilinear2d(scale_factor=16)

        # self.RFM_0 = RFM_BasicBlock3_3(1, 64)
        # self.RFM_1 = RFM_BasicBlock(1, 64)

        # self.resb_2 = BasicBlock(128, 128)
        # self.resb_3 = BasicBlock(128, 128)

        # self.outconv = nn.Conv2d(3, 1, 3, padding=1)

        # self.bn = nn.BatchNorm2d(32)
        self.bn_fuse = nn.BatchNorm2d(3)
        self.relu = nn.ReLU(inplace=True)

        # self.refunet = RefUnet(1, 64)
        self.refunetv2 = RefUnetv2(1,64)

    def forward(self, t0, t1):
        out_middle, out_bottom, out_final = self.SiameseNet(t0, t1)
        out_middle_t0, out_middle_t1 = out_middle
        out_bottom_t0, out_bottom_t1 = out_bottom
        out_final_t0, out_final_t1 = out_final

        change_maps_middle = get_change_maps(out_middle_t0, out_middle_t1)
        change_maps_bottom = get_change_maps(out_bottom_t0, out_bottom_t1)
        change_maps_final = get_change_maps(out_final_t0, out_final_t1)

        # middle = (out_middle_t0 - out_middle_t1).pow(2)
        # bottom = (out_bottom_t0 - out_bottom_t1).pow(2)
        # final = (out_final_t0 - out_final_t1).pow(2)

        change_maps_middle = self.upsample4(change_maps_middle)
        change_maps_bottom = self.upsample16(change_maps_bottom)
        # fuse = torch.cat((change_maps_middle, change_maps_bottom, change_maps_final), 1)
        # print(fuse.shape)
        # fuse = self.relu(self.bn_fuse(self.resb_1(fuse)))
        # # fuse = self.outconv(fuse)
        # fuse = self.outconv(fuse)
        # fuse = change_maps_middle + change_maps_bottom + change_maps_final
        fuse = torch.sigmoid(change_maps_middle + change_maps_bottom + change_maps_final)
        # fuse = normFUSE(fuse)

        # fuse = self.RFM_1(fuse)
        # fuse = self.RFM_0(fuse)

        # fuse = self.refunet(fuse)
        fuse = self.refunetv2(fuse)
        # print(fuse.shape)



        return torch.sigmoid(fuse), out_middle, out_bottom, out_final


class RefUnet(nn.Module):
    def __init__(self,in_ch,inc_ch):
        super(RefUnet, self).__init__()

        self.conv0 = nn.Conv2d(in_ch,inc_ch,3,padding=1)

        self.conv1 = nn.Conv2d(inc_ch,64,3,padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)

        self.pool1 = nn.MaxPool2d(2,2,ceil_mode=True)

        self.conv2 = nn.Conv2d(64,64,3,padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)

        self.pool2 = nn.MaxPool2d(2,2,ceil_mode=True)

        self.conv3 = nn.Conv2d(64,64,3,padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.relu3 = nn.ReLU(inplace=True)

        self.pool3 = nn.MaxPool2d(2,2,ceil_mode=True)

        self.conv4 = nn.Conv2d(64,64,3,padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.relu4 = nn.ReLU(inplace=True)

        self.pool4 = nn.MaxPool2d(2,2,ceil_mode=True)

        #####

        self.conv5 = nn.Conv2d(64,64,3,padding=1)
        self.bn5 = nn.BatchNorm2d(64)
        self.relu5 = nn.ReLU(inplace=True)

        #####

        self.conv_d4 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d4 = nn.BatchNorm2d(64)
        self.relu_d4 = nn.ReLU(inplace=True)

        self.conv_d3 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d3 = nn.BatchNorm2d(64)
        self.relu_d3 = nn.ReLU(inplace=True)

        self.conv_d2 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d2 = nn.BatchNorm2d(64)
        self.relu_d2 = nn.ReLU(inplace=True)

        self.conv_d1 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d1 = nn.BatchNorm2d(64)
        self.relu_d1 = nn.ReLU(inplace=True)

        self.conv_d0 = nn.Conv2d(64,1,3,padding=1)
        # self.conv_d0 = nn.Conv2d(64, 32, 3, padding=1)

        self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear')


    def forward(self,x):

        hx = x
        hx = self.conv0(hx)

        hx1 = self.relu1(self.bn1(self.conv1(hx)))
        hx = self.pool1(hx1)

        hx2 = self.relu2(self.bn2(self.conv2(hx)))
        hx = self.pool2(hx2)

        hx3 = self.relu3(self.bn3(self.conv3(hx)))
        hx = self.pool3(hx3)

        hx4 = self.relu4(self.bn4(self.conv4(hx)))
        hx = self.pool4(hx4)

        hx5 = self.relu5(self.bn5(self.conv5(hx)))

        hx = self.upscore2(hx5)

        d4 = self.relu_d4(self.bn_d4(self.conv_d4(torch.cat((hx,hx4),1))))
        hx = self.upscore2(d4)

        d3 = self.relu_d3(self.bn_d3(self.conv_d3(torch.cat((hx,hx3),1))))
        hx = self.upscore2(d3)

        d2 = self.relu_d2(self.bn_d2(self.conv_d2(torch.cat((hx,hx2),1))))
        hx = self.upscore2(d2)

        d1 = self.relu_d1(self.bn_d1(self.conv_d1(torch.cat((hx,hx1),1))))

        residual = self.conv_d0(d1)

        return x + residual


class RefUnetv2(nn.Module):
    def __init__(self,in_ch,inc_ch):
        super(RefUnetv2, self).__init__()

        self.conv0 = nn.Conv2d(in_ch,inc_ch,3,padding=1)

        self.conv1 = nn.Conv2d(inc_ch,64,3,padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)

        self.pool1 = nn.MaxPool2d(2,2,ceil_mode=True)

        self.conv2 = nn.Conv2d(64,64,3,padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)

        self.pool2 = nn.MaxPool2d(2,2,ceil_mode=True)

        self.conv3 = nn.Conv2d(64,64,3,padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.relu3 = nn.ReLU(inplace=True)

        self.pool3 = nn.MaxPool2d(2,2,ceil_mode=True)

        # self.conv4 = nn.Conv2d(64,64,3,padding=1)
        # self.bn4 = nn.BatchNorm2d(64)
        # self.relu4 = nn.ReLU(inplace=True)
        #
        # self.pool4 = nn.MaxPool2d(2,2,ceil_mode=True)

        #####

        # self.conv5 = nn.Conv2d(64,64,3,padding=1)
        # self.bn5 = nn.BatchNorm2d(64)
        # self.relu5 = nn.ReLU(inplace=True)

        #####

        self.conv_d4 = nn.Conv2d(64,64,3,padding=1)
        self.bn_d4 = nn.BatchNorm2d(64)
        self.relu_d4 = nn.ReLU(inplace=True)

        self.conv_d3 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d3 = nn.BatchNorm2d(64)
        self.relu_d3 = nn.ReLU(inplace=True)

        self.conv_d2 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d2 = nn.BatchNorm2d(64)
        self.relu_d2 = nn.ReLU(inplace=True)

        self.conv_d1 = nn.Conv2d(128,64,3,padding=1)
        self.bn_d1 = nn.BatchNorm2d(64)
        self.relu_d1 = nn.ReLU(inplace=True)

        self.conv_d0 = nn.Conv2d(64,1,3,padding=1)
        # self.conv_d0 = nn.Conv2d(64, 32, 3, padding=1)

        self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear')


    def forward(self,x):

        hx = x
        hx = self.conv0(hx)

        hx1 = self.relu1(self.bn1(self.conv1(hx)))
        hx = self.pool1(hx1)

        hx2 = self.relu2(self.bn2(self.conv2(hx)))
        hx = self.pool2(hx2)

        hx3 = self.relu3(self.bn3(self.conv3(hx)))
        hx = self.pool3(hx3)

        # hx4 = self.relu4(self.bn4(self.conv4(hx)))
        # hx = self.pool4(hx4)

        # hx5 = self.relu5(self.bn5(self.conv5(hx)))
        #
        # hx = self.upscore2(hx5)
        #
        d4 = self.relu_d4(self.bn_d4(self.conv_d4(hx)))
        hx = self.upscore2(d4)

        d3 = self.relu_d3(self.bn_d3(self.conv_d3(torch.cat((hx,hx3),1))))
        hx = self.upscore2(d3)

        d2 = self.relu_d2(self.bn_d2(self.conv_d2(torch.cat((hx,hx2),1))))
        hx = self.upscore2(d2)

        d1 = self.relu_d1(self.bn_d1(self.conv_d1(torch.cat((hx,hx1),1))))

        residual = self.conv_d0(d1)

        return x + residual