# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow import layers

'''
    UNet 3+
'''
class CNR():
    def __init__(self,out_size, ks, s, p='same'):
        self.conv = layers.Conv2D(out_size,ks,s,p)
        self.bn = layers.BatchNormalization()
        self.relu = tf.nn.relu
    def __call__(self, input,training):
        x = self.conv(input)
        x = self.bn(x,training=training)
        x = self.relu(x)
        return x

class CR():
    def __init__(self,out_size, ks, s, p='same'):
        self.conv = layers.Conv2D(out_size,ks,s,p) #todo:kaiming初始化
        self.relu = tf.nn.relu
    def __call__(self, input,training):
        x = self.conv(input)
        x = self.relu(x)
        return x

class unetConv2():
    def __init__(self, out_size, is_batchnorm, n=2, ks=3, stride=1, padding="same"):
        self.n = n
        self.ks = ks
        self.stride = stride
        self.padding = padding
        if is_batchnorm:
            for i in range(1, n + 1):
                conv = CNR(out_size,ks,stride,padding)
                setattr(self, 'conv%d' % i, conv)

        else:
            for i in range(1, n + 1):
                conv = CR(out_size,ks,stride,padding)
                setattr(self, 'conv%d' % i, conv)

    def __call__(self, inputs,training):
        x = inputs
        for i in range(1, self.n + 1):
            conv = getattr(self, 'conv%d' % i)
            x = conv(x,training)
        return x

class E2D_down():
    def __init__(self,ks_pool,s_pool,o_ch,use_max_pool=True):
        self.use_max_pool = use_max_pool
        self.max_pool = layers.MaxPooling2D(ks_pool,s_pool)
        self.conv = layers.Conv2D(o_ch,3,1,"same")
        self.bn = layers.BatchNormalization()
        self.relu = tf.nn.relu

    def __call__(self, input,training):
        if self.use_max_pool:
            x = self.max_pool(input)
        else:
            x = input
        x = self.conv(x)
        x = self.bn(x,training=training)
        x = self.relu(x)
        return x


class E2D_up():
    def __init__(self, scale_factor, o_ch):
        self.sacal_factor = scale_factor
        self.conv = layers.Conv2D(o_ch, 3, 1, "same")
        self.bn = layers.BatchNormalization()
        self.relu = tf.nn.relu

    def __call__(self, input, training):
        _,w,h,_ = input.shape.as_list()
        w = w*self.sacal_factor
        h = h*self.sacal_factor
        x = tf.image.resize_images(input,[w,h])
        x = self.conv(x)
        x = self.bn(x, training=training)
        x = self.relu(x)
        return x

class Fusion():
    def __init__(self,o_ch):
        self.conv = layers.Conv2D(o_ch,3,1,"same")
        self.bn = layers.BatchNormalization()
        self.relu = tf.nn.relu

    def __call__(self,input,training):
        x = self.conv(input)
        x = self.bn(x,training=training)
        x = self.relu(x)
        return x

class UNet_3Plus():
    #todo:添加kaiming初始化
    def __init__(self, n_classes=1, feature_scale=4, is_deconv=True, is_batchnorm=True):
        self.is_deconv = is_deconv
        self.is_batchnorm = is_batchnorm
        self.feature_scale = feature_scale

        filters = [64, 128, 256, 512, 1024]

        ## -------------Encoder--------------
        self.conv1 = unetConv2(filters[0], self.is_batchnorm)
        self.maxpool1 = layers.MaxPooling2D(pool_size=2,strides=2)


        self.conv2 = unetConv2(filters[1], self.is_batchnorm)
        self.maxpool2 = layers.MaxPooling2D(pool_size=2,strides=2)

        self.conv3 = unetConv2(filters[2], self.is_batchnorm)
        self.maxpool3 = layers.MaxPooling2D(pool_size=2,strides=2)

        self.conv4 = unetConv2(filters[3], self.is_batchnorm)
        self.maxpool4 = layers.MaxPooling2D(pool_size=2,strides=2)

        self.conv5 = unetConv2(filters[4], self.is_batchnorm)

        ## -------------Decoder--------------
        self.CatChannels = filters[0]
        self.CatBlocks = 5
        self.UpChannels = self.CatChannels * self.CatBlocks

        '''stage 4d'''
        # h1->320*320, hd4->40*40, Pooling 8 times
        self.h1_PT_hd4_relu = E2D_down(8,8,self.CatChannels)

        # h2->160*160, hd4->40*40, Pooling 4 times
        self.h2_PT_hd4_relu = E2D_down(4,4,self.CatChannels)

        # h3->80*80, hd4->40*40, Pooling 2 times
        self.h3_PT_hd4_relu = E2D_down(2,2,self.CatChannels)

        # h4->40*40, hd4->40*40, Concatenation
        self.h4_Cat_hd4_relu = E2D_down(2,2,self.CatChannels,False)

        # hd5->20*20, hd4->40*40, Upsample 2 times
        self.hd5_UT_hd4_relu = E2D_up(2,self.CatChannels)

        # fusion(h1_PT_hd4, h2_PT_hd4, h3_PT_hd4, h4_Cat_hd4, hd5_UT_hd4)
        self.relu4d_1 = Fusion(self.UpChannels)

        '''stage 3d'''
        # h1->320*320, hd3->80*80, Pooling 4 times
        self.h1_PT_hd3_relu = E2D_down(4,4,self.CatChannels)

        # h2->160*160, hd3->80*80, Pooling 2 times
        self.h2_PT_hd3_relu = E2D_down(2,2,self.CatChannels)

        # h3->80*80, hd3->80*80, Concatenation
        self.h3_Cat_hd3_relu = E2D_down(4,4,self.CatChannels,False)

        # hd4->40*40, hd4->80*80, Upsample 2 times
        self.hd4_UT_hd3_relu = E2D_up(2,self.CatChannels)

        # hd5->20*20, hd4->80*80, Upsample 4 times
        self.hd5_UT_hd3_relu = E2D_up(4,self.CatChannels)

        # fusion(h1_PT_hd3, h2_PT_hd3, h3_Cat_hd3, hd4_UT_hd3, hd5_UT_hd3)
        self.relu3d_1 = Fusion(self.UpChannels)

        '''stage 2d '''
        # h1->320*320, hd2->160*160, Pooling 2 times
        self.h1_PT_hd2_relu = E2D_down(2,2,self.CatChannels)

        # h2->160*160, hd2->160*160, Concatenation
        self.h2_Cat_hd2_relu = E2D_down(2,2,self.CatChannels,False)

        # hd3->80*80, hd2->160*160, Upsample 2 times
        self.hd3_UT_hd2_relu = E2D_up(2,self.CatChannels)

        # hd4->40*40, hd2->160*160, Upsample 4 times
        self.hd4_UT_hd2_relu = E2D_up(4,self.CatChannels)

        # hd5->20*20, hd2->160*160, Upsample 8 times
        self.hd5_UT_hd2_relu = E2D_up(8,self.CatChannels)

        # fusion(h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2)
        self.relu2d_1 = Fusion(self.UpChannels)

        '''stage 1d'''
        # h1->320*320, hd1->320*320, Concatenation
        self.h1_Cat_hd1_relu = E2D_down(2,2,self.CatChannels,False)

        # hd2->160*160, hd1->320*320, Upsample 2 times
        self.hd2_UT_hd1_relu = E2D_up(2,self.CatChannels)

        # hd3->80*80, hd1->320*320, Upsample 4 times
        self.hd3_UT_hd1_relu = E2D_up(4,self.CatChannels)

        # hd4->40*40, hd1->320*320, Upsample 8 times
        self.hd4_UT_hd1_relu = E2D_up(8,self.CatChannels)

        # hd5->20*20, hd1->320*320, Upsample 16 times
        self.hd5_UT_hd1_relu = E2D_up(16,self.CatChannels)

        # fusion(h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1)
        self.relu1d_1 = Fusion(self.UpChannels)

        # output
        self.outconv1 = layers.Conv2D(n_classes,3,1,"same")

    def __call__(self, inputs,training):
        ## -------------Encoder-------------
        h1 = self.conv1(inputs,training)  # h1->320*320*64

        h2 = self.maxpool1(h1)
        h2 = self.conv2(h2,training)  # h2->160*160*128

        h3 = self.maxpool2(h2)
        h3 = self.conv3(h3,training)  # h3->80*80*256

        h4 = self.maxpool3(h3)
        h4 = self.conv4(h4,training)  # h4->40*40*512

        h5 = self.maxpool4(h4)
        hd5 = self.conv5(h5,training)  # h5->20*20*1024

        ## -------------Decoder-------------
        h1_PT_hd4 = self.h1_PT_hd4_relu(h1,training)
        h2_PT_hd4 = self.h2_PT_hd4_relu(h2,training)
        h3_PT_hd4 = self.h3_PT_hd4_relu(h3,training)
        h4_Cat_hd4 = self.h4_Cat_hd4_relu(h4,training)
        hd5_UT_hd4 = self.hd5_UT_hd4_relu(hd5,training)
        hd4 = self.relu4d_1(tf.concat([h1_PT_hd4, h2_PT_hd4, h3_PT_hd4, h4_Cat_hd4, hd5_UT_hd4], -1),training)  # hd4->40*40*UpChannels

        h1_PT_hd3 = self.h1_PT_hd3_relu(h1,training)
        h2_PT_hd3 = self.h2_PT_hd3_relu(h2,training)
        h3_Cat_hd3 = self.h3_Cat_hd3_relu(h3,training)
        hd4_UT_hd3 = self.hd4_UT_hd3_relu(hd4,training)
        hd5_UT_hd3 = self.hd5_UT_hd3_relu(hd5,training)
        hd3 = self.relu3d_1(tf.concat([h1_PT_hd3, h2_PT_hd3, h3_Cat_hd3, hd4_UT_hd3, hd5_UT_hd3], -1),training)  # hd3->80*80*UpChannels

        h1_PT_hd2 = self.h1_PT_hd2_relu(h1,training)
        h2_Cat_hd2 = self.h2_Cat_hd2_relu(h2,training)
        hd3_UT_hd2 = self.hd3_UT_hd2_relu(hd3,training)
        hd4_UT_hd2 = self.hd4_UT_hd2_relu(hd4,training)
        hd5_UT_hd2 = self.hd5_UT_hd2_relu(hd5,training)
        hd2 = self.relu2d_1(tf.concat([h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2], -1),training)  # hd2->160*160*UpChannels

        h1_Cat_hd1 = self.h1_Cat_hd1_relu(h1,training)
        hd2_UT_hd1 = self.hd2_UT_hd1_relu(hd2,training)
        hd3_UT_hd1 = self.hd3_UT_hd1_relu(hd3,training)
        hd4_UT_hd1 = self.hd4_UT_hd1_relu(hd4,training)
        hd5_UT_hd1 = self.hd5_UT_hd1_relu(hd5,training)
        hd1 = self.relu1d_1(tf.concat([h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1], -1),training)  # hd1->320*320*UpChannels

        d1 = self.outconv1(hd1)  # d1->320*320*n_classes
        return tf.nn.sigmoid(d1)

if __name__ == '__main__':
    input = tf.placeholder(tf.float32,shape=[None,320,320,3])
    model = UNet_3Plus()
    x = model(input,True)
    print("a")
