import numpy as np
import tensorflow as tf
from keras import backend as k
from keras.layers import Conv2D, BatchNormalization, Activation, Lambda, add
import keras.backend as K
from keras.regularizers import l2


def focal(factor, gamma=2.0):
    def _focal(y_true, y_pred):
        labels         = y_true
        classification = y_pred
        # compute the focal loss
        alpha_weight = k.ones_like(labels) * factor
        alpha_weight = tf.where(k.equal(labels, 1), alpha_weight, k.ones_like(labels)) / factor
        focal_weight = tf.where(k.equal(labels, 1), 1 - classification, classification) ** gamma
        cls_loss = alpha_weight * focal_weight * k.binary_crossentropy(labels, classification)
        cls_loss = k.clip(cls_loss, 1e-5, 1e5)
        return k.mean(cls_loss)
    return _focal


def dynamic_focal_keras(p_threshold=0.7, decay=0.1, class_num=1):
    def _focal(y_true, y_pred):
        labels         = k.reshape(y_true, (-1, class_num))
        classification = k.reshape(y_pred, (-1, class_num))
        factor_true = k.cast(k.sum(1 - labels), tf.float32)
        factor_false = k.cast(k.sum(labels), tf.float32)
        sum_pixel = factor_true + factor_false
        factor_true /= sum_pixel
        alpha_weight = k.ones_like(labels) * factor_true
        alpha_weight = tf.where(k.equal(labels, 1), alpha_weight, 1 - alpha_weight)
        # compute the focal loss
        greater_factor = K.sum(k.cast(tf.greater_equal(classification, p_threshold), tf.float32)) / sum_pixel
        decay_weight = k.ones_like(labels) * greater_factor
        decay_weight = tf.where(k.greater_equal(classification, p_threshold), 1 - decay_weight, decay_weight)
        # focal weight constantly results a nan loss
        # focal_weight = tf.where(k.equal(labels, 1), 1 - classification, classification) ** gamma
        # cls_loss = alpha_weight * focal_weight * k.abs(labels - classification)
        cls_loss = decay_weight*alpha_weight*k.binary_crossentropy(labels, classification)
        # cls_loss = k.clip(cls_loss, 1e-5, 1e5)
        return k.mean(cls_loss)
    return _focal


def dynamic_focal_tf(p_threshold=0.5, class_num=1, gamma=1.0):
    def _focal(y_true, y_logit):
        labels         = tf.reshape(y_true, (-1, ))
        labels_sparse = tf.one_hot(labels, depth=class_num)
        # class imbalance weighting factor
        class_count = tf.reduce_sum(labels_sparse, axis=0)
        weights = 1 - tf.cast(class_count, tf.float32) / tf.cast(tf.reduce_sum(class_count), tf.float32)
        weights = tf.reshape(weights, (-1, 1))
        class_weight = tf.squeeze(tf.keras.backend.dot(labels_sparse, weights))
        # compute balance between easy samples and hard samples
        y_pred = tf.nn.softmax(tf.reshape(y_logit, (-1, class_num)))
        pred = tf.where(tf.equal(labels_sparse, 1), y_pred, K.zeros_like(y_pred))
        pred = tf.reduce_max(pred, axis=1)
        sum_pixel = tf.cast(tf.reduce_sum(labels_sparse), tf.float32)
        greater_factor = K.sum(k.cast(tf.greater_equal(pred, p_threshold), tf.float32)) / sum_pixel
        decay_weight = k.ones_like(pred) * greater_factor
        decay_weight = tf.where(k.greater_equal(pred, p_threshold), 1 - decay_weight, 3*decay_weight)
        # focal weight
        focal_weight = K.clip((1 - pred) ** gamma, 1e-5, 1)
        # decay_weight = 1.0
        # class_weight = 1.0
        # focal_weight = 1.0
        cls_loss = decay_weight*class_weight*focal_weight*tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=y_logit)
        # cls_loss = k.clip(cls_loss, 1e-6, 1e3)
        return k.mean(cls_loss)
    return _focal


def true_mean_logit(y_true, y_pred):
    labels         = k.reshape(y_true, (-1, 1))
    classification = k.reshape(y_pred, (-1, 1))
    true_logit = labels * classification
    true_value = k.sum(true_logit) / k.sum(labels)
    return true_value


def false_mean_logit(y_true, y_pred):
    labels         = k.reshape(y_true, (-1, 1))
    classification = k.reshape(y_pred, (-1, 1))
    false_logit = (1 - labels) * classification
    false_value = k.sum(false_logit) / k.sum(1 - labels)
    return false_value


def top_k_false_mean_logit(y_true, y_pred, k_top=100*100):
    labels = k.reshape(y_true, (1, -1))
    classification = k.reshape(y_pred, (1, -1))
    false_logit = (1 - labels) * classification
    false_top_k_logit, _ = tf.nn.top_k(false_logit, k=k_top)
    return k.mean(false_top_k_logit)


def group_norm(input_tensor, G, channel_per_group=1, gamma=1.0, beta=0.0, eps=1e-5):
    """
    group normalization from kaiming He paper
    :param input_tensor:
    :param gamma:
    :param beta:
    :param G:
    :param eps:
    :return:
    """
    if K.image_data_format() == 'channels_last':
        n, h, w, c = input_tensor.shape
    else:
        n, c, h, w = input_tensor.shape
    x = tf.reshape(input_tensor, [n, G, c // G, h, w])
    mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
    x = (x - mean) / tf.sqrt(var+eps)
    x = tf.reshape(x, [n, c, h, w])
    return x*gamma + beta