from model import Unet_model
from UNet import create_model
import os

import tensorflow as tf
import numpy as np

from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras import backend as K
from scipy.ndimage import distance_transform_edt as distance

os.environ["CUDA_VISIBLE_DEVICES"] = '6'


def dice_loss(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = y_true_f * y_pred_f
    score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return 1. - score


def bce_dice_loss(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss


def dice_coef(y_true, y_pred, smooth=1):
    intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
    union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
    return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)


def calc_dist_map(seg):
    res = np.zeros_like(seg)
    posmask = seg.astype(np.bool)

    if posmask.any():
        negmask = ~posmask
        res = distance(negmask) * negmask - (distance(posmask) - 1) * posmask

    return res


def calc_dist_map_batch(y_true):
    y_true_numpy = y_true
    return np.array([calc_dist_map(y)
                     for y in y_true_numpy]).astype(np.float32)


def surface_loss_keras(y_true, y_pred):
    """boundary loss"""
    y_true_dist_map = tf.py_func(func=calc_dist_map_batch,
                                 inp=[y_true],
                                 Tout=tf.float32)
    multipled = y_pred * y_true_dist_map
    return K.mean(multipled)


def augment(input_image, input_mask):
    if tf.random.uniform(()) > 0.5:
        # Random flipping of the image and mask
        input_image = tf.image.flip_left_right(input_image)
        input_mask = tf.image.flip_left_right(input_mask)

    return input_image, input_mask


if __name__ == '__main__':
    input_shape = [128, 128, 3]
    data_augmentation = tf.keras.Sequential([
        tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal", seed=42,
                                                              input_shape=[128, 128, 3]),
    ])
    train_ct = np.load('/hdd9/ppp/ImageSegments/COVID/train_ct_lung_mask.npy')
    train_gt = np.load('/hdd9/ppp/ImageSegments/COVID/train_gt_lung_mask.npy')

    test_ct = np.load('/hdd9/ppp/ImageSegments/COVID/test_ct_lung_mask.npy')
    test_gt = np.load('/hdd9/ppp/ImageSegments/COVID/test_gt_lung_mask.npy')

    batch_size = 8
    resize_and_rescale = tf.keras.Sequential([
        # tf.keras.layers.experimental.preprocessing.Resizing(IMG_SIZE, IMG_SIZE),
        tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)
    ])
    CT_train = tf.data.Dataset.from_tensor_slices((train_ct, train_gt))
    CT_test = tf.data.Dataset.from_tensor_slices((test_ct, test_gt))
    CT_train = CT_train.map(lambda x, y: (resize_and_rescale(x), y))
    CT_train = CT_train.map(augment).batch(batch_size)

    CT_test = CT_test.map(lambda x, y: (resize_and_rescale(x), y))
    CT_test = CT_test.batch(batch_size)
    unet = create_model(input_shape)
    # unet = Unet_model(input_shape)
    unet.compile(loss=dice_loss,
                 optimizer=tf.keras.optimizers.Adam(1e-4),
                 metrics=[dice_coef, 'acc'])
    # unet.summary()
    history = unet.fit(CT_train,
                       validation_data=CT_test,
                       epochs=50)
