import sys
import numpy as np
import tensorflow as tf
import SimpleITK as sitk
import nibabel as nib
from collections import OrderedDict

def slide(array, start, size):
    return array[start[0]: start[0] + size[0],
                 start[1]: start[1] + size[1],
                 start[2]: start[2] + size[2]]

def connectedComponentFilter(array, isFirst=False, threshold=512):
    connectedFilter = sitk.ConnectedComponentImageFilter()
    connectedFilter.SetDebug(False)
    connectedFilter.SetFullyConnected(False)

    tempImage = sitk.GetImageFromArray(array.astype(np.uint8))
    filteredImage = connectedFilter.Execute(*[tempImage])
    tempResult = sitk.GetArrayFromImage(filteredImage)

    btm = False
    for i in range(1, tempResult.max() + 1):
        s = (tempResult == i).sum()
        if s > threshold:
            if np.argwhere(tempResult == i)[:, 2].min() == 0:
                btm = True

    if not isFirst:
        btm = False

    max = -1
    maxLabel = -1
    for i in range(1, tempResult.max() + 1):
        s = (tempResult == i).sum()
        if s < threshold or (btm == True and np.argwhere(tempResult == i)[:, 2].min() != 0):
            continue
        elif s > max:
            max = s
            maxLabel = i

    ret =  (tempResult == maxLabel).astype(np.uint8)
    return ret


def centerOfSegment(array, ROICenter):
    indexs = np.argwhere(array)
    center = indexs.sum(0) / indexs.shape[0]
    return [int(center[i]) + ROICenter[i] for i in range(3)]

def Conv3d(x, dr, is_training, features1=64, features2=64, last=False, dilate_rate=1):

    convolution1 = tf.layers.conv3d(x, features1, [3, 3, 3], padding="same", use_bias=True, activation=tf.nn.relu,  dilation_rate=dilate_rate,
                                    kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.01))
    bn1 = tf.layers.batch_normalization(convolution1, training=is_training)

    dropout = tf.layers.dropout(bn1, rate=dr)
    if last==False:
        convolution2 = tf.layers.conv3d(dropout, features2, [3, 3, 3], padding="same", use_bias=True, activation=tf.nn.relu, dilation_rate=dilate_rate,
                                        kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.01))
    else:
        convolution2 = tf.layers.conv3d(dropout, features2, [3, 3, 3], padding="same", use_bias=True, activation=tf.nn.sigmoid,
                                        kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.01))
    bn2 = tf.layers.batch_normalization(convolution2, training=is_training)
    return bn2

def MaxPool3d(x):
    return tf.layers.max_pooling3d(x, [2, 2, 2], [2, 2, 2], padding="valid")

def Upsamping3d(x, features=64):
    return tf.layers.conv3d_transpose(x, features, 3, strides=2, padding="same", activation=tf.nn.relu,
                                      kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.01))

def create_conv_net(input_image, input_memory, training, keep, batch_size=1, shape=(96, 96, 80)):

    # Placeholder for the input image
    image_reshape = tf.reshape(input_image, tf.stack([batch_size, shape[0], shape[1], shape[2], 1]))
    memory_reshape = tf.reshape(input_memory, tf.stack([batch_size, shape[0], shape[1], shape[2], 1]))

    concat_tensor = OrderedDict()

    input_tensor = tf.concat([image_reshape, memory_reshape], -1)
    compressed_tensor = Conv3d(input_tensor, keep, is_training=training)
    in_node = compressed_tensor

    # down layers
    for layer in range(0, 3):
        convolution = Conv3d(in_node, keep, is_training=training, dilate_rate=2)
        concat_tensor[layer] = convolution
        in_node = MaxPool3d(convolution)

    in_node = Conv3d(in_node, keep, is_training=training)
    # up layers
    for layer in range(2, -1, -1):
        upsamping = Upsamping3d(in_node)
        concated = tf.concat([concat_tensor[layer], upsamping], -1)
        in_node = Conv3d(concated, keep, is_training=training)

    output = tf.layers.conv3d(in_node, 1, [1, 1, 1], padding="same", use_bias=True, activation=tf.nn.sigmoid,
                                        kernel_initializer=tf.contrib.layers.xavier_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.01))
    return output

######################################################################################################################################################

class Unet(object):
    def __init__(self, channels=1, n_class=1, cost_kwargs={}, **kwargs):
        tf.reset_default_graph()
        self.n_class = n_class
        self.input = tf.placeholder("float", shape=[None, None, None, None, channels])
        self.memory = tf.placeholder("float", shape=[None, None, None, None, channels])
        self.label = tf.placeholder("float", shape=[None, None, None, None, channels])
        self.distance = tf.placeholder("float", shape=[None, None, None, None, channels])
        self.keep_prob = tf.placeholder(tf.float32)
        self.training = tf.placeholder(tf.bool)

        self.output = create_conv_net(self.input, self.memory, self.training, self.keep_prob)
        self.cost = self._get_cost(self.output, cost_kwargs)

        self.predictor = tf.round(self.output)
        self.correct_pred = tf.equal(self.predictor, self.label)

        eps = 1e-5
        intersection = tf.reduce_sum(self.predictor * self.label)
        union = eps + tf.reduce_sum(self.predictor) + tf.reduce_sum(self.label)
        self.accuracy = 200 * intersection / (union)
        #self.accuracy = 100 * intersection / (union - intersection)

    def _get_cost(self, output, cost_kwargs):
        flat_logits = tf.reshape(output, [-1, 1])
        flat_labels = tf.reshape(self.label, [-1, 1])
        flat_distance = tf.reshape(self.distance, [-1, 1])
        weight = 8 * tf.exp(-flat_distance * flat_distance / 36) + 1

        cost = 0.2 * (1 - flat_labels) * flat_logits * weight + (1 - flat_logits) * flat_labels * weight
        return tf.reduce_mean(cost) + tf.losses.get_regularization_loss()

    def predict(self, model_path, x_test, y_test):
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)
            self.restore(sess, model_path)
            prediction = sess.run(self.predictor, feed_dict={self.input: x_test,
                                                             self.memory: y_test,
                                                             self.keep_prob: 1.,
                                                             self.training: False})
            return prediction

    def restore(self, sess, model_path):
        saver = tf.train.Saver()

class Prediction(object):
    def __init__(self, model_path):
        super(Prediction, self).__init__()
        self.model_path = model_path

    def __predictPatch(self, sess, image_tf, memory_tf, isFirst=False):
        output = sess.run(self.net.predictor, feed_dict={self.net.input: image_tf,
                                                         self.net.memory: memory_tf,
                                                         self.net.keep_prob: 1.,
                                                         self.net.training: False})
        return connectedComponentFilter(output[0, ..., 0], isFirst)

    def predict(self, data):
        tf_conf = tf.ConfigProto()
        tf_conf.gpu_options.allow_growth = True
        self.net = Unet()

        input_image = data
        dimensions = input_image.shape

        # define bounding box size
        box_size = [96, 96, 80]
        delta = [box_size[i] // 2 for i in range(3)]
        box_corner = [0, 0, 0]

        # normalization
        nRange = [500, 3500]
        input_image = (input_image + 1024 - nRange[0]) / (nRange[1] - nRange[0])
        input_image = np.clip(input_image, 0.0, 1.0)
        input_image = input_image.astype(np.float32)

        # extend image
        extend_size = [dimensions[i] + box_size[i] + 10 for i in range(3)]
        extended_image = np.zeros(extend_size, dtype=np.float32)
        extended_image[0: dimensions[0], 0: dimensions[1], 0: dimensions[2]] = input_image
        extended_memory = np.zeros(extend_size)

        # array for network input
        image_tf = np.zeros([1, box_size[0], box_size[1], box_size[2], 1], dtype=np.float32)
        memory_tf = np.zeros([1, box_size[0], box_size[1], box_size[2], 1])

        # segment first visible vertebra
        with tf.Session(config=tf_conf) as sess:
            saver = tf.train.Saver()
            model_path1 = self.model_path + '/1/model.cpkt'
            print(model_path1)
            saver.restore(sess, model_path1)

            is_found = False
            while not is_found:
                image_tf[0, ..., 0] = slide(extended_image, box_corner, box_size)
                prediction = self.__predictPatch(sess, image_tf, memory_tf, True)
                if prediction.sum() > 0:
                    is_found = True
                    box_corner = centerOfSegment(prediction, box_corner)
                else:
                    if box_corner[1] + box_size[1] > dimensions[1] and box_corner[0] + box_size[0] > dimensions[0]:
                        box_corner[0] = 0
                        box_corner[1] = 0
                        box_corner[2] += delta[2]
                    else:
                        if box_corner[1] + box_size[1] > dimensions[1]:
                            box_corner[1] = 0
                            box_corner[0] += delta[0]
                        else:
                            box_corner[1] += delta[1]

            for iter in range(3):
                box_corner = [max(0, box_corner[idx] - delta[idx]) for idx in range(3)]
                image_tf[0, ..., 0] = slide(extended_image, box_corner, box_size)
                prediction = self.__predictPatch(sess, image_tf, memory_tf, True)

                if iter == 2:
                    image_tf[0, ..., 0] = extended_image[box_corner[0]: box_corner[0]+ box_size[0], box_corner[1]: box_corner[1]+ box_size[1], box_corner[2] + 5: box_corner[2] + 5 + box_size[2]]
                    prediction2 = self.__predictPatch(sess, image_tf, memory_tf, True)
                    dice_score = (prediction2[..., 0: 75] * prediction[..., 5: 80]).sum() * 2.0 / ((prediction2[:, :, 0: 75].sum() + prediction[:, :, 5: 80].sum()))
                    union = (prediction2[..., 0: 75] * prediction[..., 5: 80]).sum() * 1.0 / prediction2[..., 0: 75].sum()
                    if not (union < 0.25 or dice_score > 0.9):
                        prediction = prediction2[::]
                        box_corner[2] += 5

                last_corner = box_corner[:]
                box_corner = centerOfSegment(prediction, box_corner)
            extended_memory[last_corner[0]: last_corner[0]+ box_size[0], last_corner[1]: last_corner[1]+ box_size[1], last_corner[2]: last_corner[2]+ box_size[2]] += prediction

            box_corner = centerOfSegment(extended_memory, [-delta[0], -delta[1], -delta[2]])

            vSize, pSize = [], []
            for i in range(2, 25):
                for iter in range(3):
                    box_corner = [max(0, box_corner[idx]) for idx in range(3)]
                    image_tf[0, ..., 0] = slide(extended_image, box_corner, box_size)
                    memory_tf[0, ..., 0] = slide(extended_memory, box_corner, box_size)
                    memory_tf = (memory_tf > 0) * 1
                    prediction = self.__predictPatch(sess, image_tf, memory_tf)
                    prediction = prediction.astype(int) - memory_tf[0, ..., 0]  * prediction

                    last_corner = box_corner[:]
                    box_corner = centerOfSegment(prediction, box_corner)
                    for j in range(3):
                        box_corner[j] -= delta[j]
                    box_corner[0] += 10

                if prediction.sum() == 0:
                    break

                upper = np.argwhere(prediction)[:, 2].max() + box_corner[2]
                extended_memory[last_corner[0]: last_corner[0]+ box_size[0], last_corner[1]: last_corner[1]+ box_size[1], last_corner[2]: last_corner[2]+ box_size[2]] += i * prediction
                vSize.append(prediction.sum())
                pSize.append(prediction.max(0).sum())
                if upper >= dimensions[2] - 1:
                    break
                if i > 3:
                    if vSize[-2] - vSize[-3] > 1000 and pSize[-2] - pSize[-3] > 100 and vSize[-3] < 15000:
                        break
            result = extended_memory[0: dimensions[0], 0: dimensions[1], 0: dimensions[2]]
        return result


if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("Usage: Segmentation.py volume.nii.gz")
        sys.exit()

    model = Prediction("./models")

    nib_image = nib.load(sys.argv[1])
    image = nib_image.get_fdata()
    new_data = model.predict(image)

    affine = nib_image.affine.copy()
    hdr = nib_image.header.copy()
    new_nii = nib.Nifti1Image(new_data, affine, hdr)
    nib.save(new_nii, "label.nii.gz")
