import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torchvision.models as models
from torch.autograd import Variable
class ShareSepConv(nn.Module):

class BottleneckBlockdls(nn.Module):
    def __init__(self,in_planes,out_planes,dropRate=0.0):
        super(BottleneckBlockdls, self).__init__()
        inter_planes=out_planes*4
        self.bn1=nn.BatchNorm2d(in_planes)
        self.relu=nn.ReLU(inplace=True)
        self.conv_o=nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=1,
                              padding=0,bias=False)
        self.shareconv2=ShareSepConv(3)


class Segmentation(nn.Module):
    def __init__(self):
        super(Segmentation,self).__init__()
        self.dense_block1=BottleneckBlockrs1(3,61)
        self.trans_block1=TransitionBlock1(64,64)

        self.dense_block2=BottleneckBlockdls(67,64)
        self.trans_block2=TransitionBlock3(128,64)

        self.dense_block_1=Bottleneckdls(64,64)
        self.trans_block3_1=TransitionBlock3(128,64)

        self.dense_block3_2=BottleneckBlockdls(64,64)
        self.trans_block3_2=TransitionBlock3(128,64)

        self.dense_block4=BottleneckBlockdls(128,64)
        self.trans_block4=TransitionBlock(192,64)

        ############# Block5-up  16-16 ##############
        self.dense_block5 = BottleneckBlockdls(128, 64)
        self.trans_block5 = TransitionBlock(196, 64)

        self.dense_block6 = BottleneckBlockrs1(64, 64)
        self.trans_block6 = TransitionBlock3(128, 16)

        self.conv_refin=nn.Conv2d(23,16,3,1,1)
        self.conv_refin64=nn.Conv2d(192,16,3,1,1)
        self.tanh=nn.Sigmoid()

        self.conv_refine3=nn.Conv2d(16,4,kernel_size=3,stride=1,padding=1)
        self.conv_refine3_i=nn.Conv2d(16,4,kernel_size=3,stride=1,padding=1)

        self.upsample=F.upsample_nearest

        self.relu=nn.LeakyReLU(0.2,inplace=True)
        self.refineclean1=nn.Conv2d(4,8,kernel_size=7,stride=1,padding=3)
        self.refineclean2=nn.Conv2d(8,4,kernel_size=3,stride=1,padding=1)

        self.conv11=nn.Conv2d(64,1,kernel_size=3,stride=1,padding=1)
        self.conv21=nn.Conv2d(64,1,kernel_size=3,stride=1,padding=1)
        self.conv11 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv21 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv31 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv3_11 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv3_21 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv41 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm
        self.conv51 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)  # 1mm

        self.batchnorm20=nn.BatchNorm2d(20)
        self.batchnorm1=nn.BatchNorm2d(1)

    def forward(self,x,x_64):
        x1=self.dense_block1(x)
        x2=self.trans_block1(x1)

        ##32*32
        x2=(self.dense_block2(torch.cat([x1,x_64],1)))
        x2=self.trans_block2(x2)

        ##print x2.size()
        ##16*16
        x3=(self.dense_block3(x3))
        x3=self.trans_block3(x3)

        x3_1=(self.dense_block3_1(x3))
        x3_1=self.trans_block3_1(x3_1)
        #print x3_1.size()
        x3_2=(self.dense_block3_2(x3_1))
        x3_2=self.trans_block3_2(x3_2)

        #classifier
        x4_in=torch.cat([x3_2,x2],1)
        x4=(self.dense_block4(x4_in))
        x4=self.trans_block4(x4)
        x5_in=torch.cat([x4,x1],1)
        x5_i=(self.dense_block5(x5_in))
        xhat64=self.relu(self.conv_refin64(x5_i))
        xhat64=self.tanh(self.refine3_i(xhat64))
        x5=self.trans_block5(torch.cat([x5_i,xhat64],1))

        x6=(self.dense_block6(x5))
        x6=(self.trans_block(x6))
        shape_out=x6.data.size()
        #print(shape_out)
        shape_out=shape_out[2:4]
        x11=self.upsample(self.relu((self.conv11(x1))),size=shape_out)
        x21=self.upsample(self.relu((self.conv21(x2))),size=shape_out)
        x3l=self.upsample(self.relu((self.conv31(x3))),size=shape_out)
        x3_11=self.upsample(self.relu((self.conv3_11(x3_1))),size=shape_out)
        x3_21=self.upsample(self.relu((self.conv3_21(x3_2))),size=shape_out)
        x41=self.upsample(self,relu((self.conv41(x4))),size=shape_out)
        x51=self.upsample(self.relu((self.conv51(x5))),size=shape_out)
        x6=torch.cat([x6,x51,x41,x3_21,x3_11,x31,x21,x11],1)
        x7=self.relu(self.conv_refin(x6))
        residual=self.tanh(self.conv_refine3(x7))

        return residual,xhat64



