﻿from .layers import *

import tensorflow as tf

initializer = tf.contrib.layers.xavier_initializer()

def mynetwork(inputs, training, keep_prob, num_layers=3):
    """
    Creates a new convolutional unet for the given parametrization.

    Args:
        image: input tensor, shape [?, nx, ny, nz, 1]
        memory: input memory with the same shape as input image
        num_layers: number of downsampling layers
    """

    # encoder_features = [64] * (num_layers + 1)
    encoder_features = [32 * 2 ** i for i in range(num_layers + 1)]
    features = encoder_features + encoder_features[::-1]
    features = [[f, f] for f in features]

    concat_tensor = {}
    tensor = inputs
    tensor = DoubleConv(tensor, features[0], keep_prob, training)

    # Spatial dropout after first conv
    tensor = tf.nn.dropout(tensor, keep_prob=keep_prob)

    for layer in range(0, num_layers):
        tensor = DoubleConv(tensor, features[1 + layer], keep_prob, training)
        concat_tensor[layer] = tensor
        tensor = MaxPool3d(tensor)

    tensor = DoubleConv(tensor, features[1 + num_layers], keep_prob, training)

    for layer in range(num_layers - 1, -1, -1):
        # upsampled = Upsamping3d(tensor)
        upsampled = NNUpsamping3d(tensor)
        concated = tf.concat([concat_tensor[layer], upsampled], -1)
        feature = features[1 + num_layers * 2 - layer]
        tensor = DoubleConv(concated, feature, keep_prob, training)

    last_conv = tf.layers.conv3d(tensor, 1, 1, padding="same",
                                 use_bias=True, activation=None,
                                 kernel_initializer=initializer)

    return last_conv
