import tensorflow as tf

from functools import reduce
from operator import mul

def dice_similarity_cofficient(labels, logits):
    """
    Dice loss
    Fork from https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py
    https://arxiv.org/pdf/1707.01992.pdf
    """

    logits = tf.nn.sigmoid(logits)

    tmp = tf.ones_like(logits)
    logits = tf.concat([logits, tmp - logits], -1)
    labels = tf.concat([labels, tmp - labels], -1)

    reduce_axes = list(range(len(logits.shape) - 1))
    dice_numerator = 2.0 * tf.reduce_sum(logits * labels, axis=reduce_axes)
    dice_denominator = (tf.reduce_sum(tf.square(logits), axis=reduce_axes) +
                        tf.reduce_sum(tf.square(labels), axis=reduce_axes))
    dice_score = dice_numerator / (dice_denominator + 1e-5)
    return 1.0 - tf.reduce_mean(dice_score)


def weighted_cost(label, logits, distance):
    """Loss from paper"""
    logits = tf.nn.sigmoid(logits)

    length = reduce(mul, logits.shape.as_list()[1: ])
    flat_logits = tf.reshape(logits, [-1, length])
    flat_labels = tf.reshape(label, [-1, length])
    flat_distance = tf.reshape(distance, [-1, length])
    weight = 8 * tf.exp(-flat_distance * flat_distance / 36) + 1

    ilambda = 0.1
    cost =  (1 - flat_labels) * flat_logits * weight * ilambda + \
            (1 - flat_logits) * flat_labels * weight    
    return tf.reduce_sum(cost)


def BCE_loss(labels, logits, distance):
    """Binary cross entropy loss"""
    loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
    return tf.reduce_mean(loss)

def Focal_loss(labels, logits, distance, alpha=0.25, gamma=2.0):
    bce_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
    pt = tf.exp(-bce_loss)
    weight = labels * 0.25 + (1 - labels) * 0.75
    loss = tf.pow(1 - pt, gamma) * bce_loss * weight
    return tf.reduce_mean(loss) * 2
