import tensorflow as tf

initializer = tf.contrib.layers.xavier_initializer()
regularizer = tf.contrib.layers.l2_regularizer(scale=1e-5)

def Conv(inp, feat, ksize, stride, keep, training, processes='cgr'):
    inpc = inp
    for process in processes:
        if process == 'c':
            inp = tf.layers.conv3d(inp, feat, ksize, stride, padding="same",
                                   use_bias=True, activation=None,
                                   kernel_initializer=initializer,
                                   kernel_regularizer=regularizer)
        elif process == 'b':
            inp = tf.layers.batch_normalization(inp, training=training)
        elif process == 'g':
            inp = tf.contrib.layers.group_norm(inp, groups=16, scale=False,
                                               reduction_axes=(-4, -3, -2))
        elif process == 'r':
            inp = tf.nn.relu(inp)
        elif process == 'e':
            inp = tf.nn.elu(inp)
        elif process == 'l':
            inp = tf.nn.leaky_relu(inp, 0.3)
        elif process == 's':
            inp = tf.keras.layers.SpatialDropout3D(rate=1.0-keep)(inp)
        elif process == 'd':
            inp = tf.nn.dropout(inp, keep_prob=keep)
        elif process == 'i':
            inp = tf.contrib.layers.instance_norm(inp)
        elif process == 'a':
            if feat == inpc.get_shape().as_list()[4]:
                inp += inpc
            else:
                inp += Conv(inpc, feat, 1, 1, 'c')
    return inp


def DoubleConv(inp, feats, keep, training, shortcut=False):
    if shortcut:
        return Conv(inp, feats[0], 3, 1, keep, training, 'cgrdcgar')
    else:
        return Conv(inp, feats[0], 3, 1, keep, training, 'crbdcrb')
    '''
    conv = Conv(inp, feats[0], 3, 1, 'cgr')
    conv = Conv(conv, feats[1], 3, 1, 'cg')
    if shortcut:
        if feats[1] == inp.get_shape().as_list()[4]:
            conv += inp
        else:
            conv += Conv(inp, feats[1], 1, 1, 'c')
    return tf.nn.relu(conv)
    '''

def preActive(inp, keep, feats=[64, 64]):
    conv = Conv(inp, feats[0], 3, 1, keep, 'grc')
    conv = Conv(conv, feats[1], 3, 1, keep, 'grc')
    if feats[1] == inp.get_shape().as_list()[4]:
        conv += inp
    else:
        conv += Conv(inp, feats[1], 1, 1, keep, 'c')
    return conv


def MaxPool3d(input):
    return tf.layers.max_pooling3d(input, 2, 2, padding="valid")


def MaxPool3dConv(inp, feat=64):
    return tf.layers.conv3d(inp, feat, 3, 2,
                            padding="same", use_bias=True,
                            activation=None)


def Upsamping3d(input, feat=64):
    return tf.layers.conv3d_transpose(input, feat, 3, strides=2, padding="same", activation=tf.nn.relu,
                                      kernel_initializer=initializer, kernel_regularizer=regularizer)


def NNUpsamping3d(input, scale=2, feats=64):
    """ linear interpolate upsampling and convolution """
    nn = tf.keras.layers.UpSampling3D(scale)(input)
    return nn
    #return Conv(nn, feats, 3, 1, keep, 'c')


#------------------------------------------------------------------------------------------------------------------------#
'''

def DACblock(input):
    """ https://arxiv.org/pdf/1903.02740.pdf """
    features = input.get_shape().as_list()[4]
    activation = tf.nn.relu

    x1 = tf.layers.conv3d(input, features, 3, 1, "same", activation=activation,
                          kernel_initializer=initializer, dilation_rate=1)

    x2 = tf.layers.conv3d(input, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=3)
    x2 = tf.layers.conv3d(x2, features, 1, 1, "same", activation=activation,
                          kernel_initializer=initializer, dilation_rate=1)

    x3 = tf.layers.conv3d(input, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=1)
    x3 = tf.layers.conv3d(x3, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=3)
    x3 = tf.layers.conv3d(x3, features, 1, 1, "same", activation=activation,
                          kernel_initializer=initializer, dilation_rate=1)

    x4 = tf.layers.conv3d(input, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=1)
    x4 = tf.layers.conv3d(x4, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=3)
    x4 = tf.layers.conv3d(x4, features, 3, 1, "same", activation=None,
                          kernel_initializer=initializer, dilation_rate=5)
    x4 = tf.layers.conv3d(x4, features, 1, 1, "same", activation=activation,
                          kernel_initializer=initializer, dilation_rate=1)

    out = input + x1 + x2 + x3 + x4
    return out


def resample(tensor, newSize):
    """ aribrity size resampling of 5D Tensor """
    bs, sizeX, sizeY, sizeZ, chns = tensor.get_shape().as_list()
    newSizeX, newSizeY, newSizeZ = newSize

    squeezeX = tf.reshape(tensor, [-1, sizeY, sizeZ, chns])
    resizeX = tf.image.resize_bilinear(squeezeX, [newSizeY, newSizeY])
    resumeX = tf.reshape(resizeX, [bs, sizeX, newSizeY, newSizeZ, chns])

    # resize x-y
    #   first reorient
    reoriented = tf.transpose(resumeX, [0, 3, 2, 1, 4])
    #   squeeze and 2d resize
    squeezeZ = tf.reshape(reoriented, [-1, newSizeY, sizeX, chns])
    resizeZ = tf.image.resize_bilinear(squeezeZ, [newSizeY, newSizeX])
    resumeZ = tf.reshape(resizeZ, [bs, newSizeZ, newSizeY, newSizeX, chns])

    return tf.transpose(resumeZ, [0, 3, 2, 1, 4])


def SPPBlock(input):
    _, x, y, z, features = input.get_shape().as_list()
    initializer = tf.contrib.layers.xavier_initializer()
    activation = tf.nn.relu

    poolSizes = [2, 3, 5, 6]
    layers = [input]
    for poolSize in poolSizes:
        pool = tf.layers.max_pooling3d(
            input, poolSize, poolSize, padding="valid")
        reuse = None if poolSize == 2 else True
        conv = tf.layers.conv3d(pool, 1, 1, 1, "same", activation=None,
                                kernel_initializer=initializer, reuse=reuse, name='SPPBlock')
        layers.append(resample(conv, [x, y, z]))

    return tf.concat(layers, -1)
'''
