import theano.tensor as T
from sklearn.metrics.ranking import roc_auc_score
from keras import backend as K

def auc_obj(y_true, y_pred):
    y_true = K.eval(y_true)
    y_pred = K.eval(y_pred)
    return K.variable(1.-roc_auc_score(y_true,y_pred))

def weighted_mean_squared_error(y_true, y_pred):
    zero_part = 135150./15000. * T.mean(T.square(y_pred[y_true==0] - y_true[y_true==0]), axis=-1)
    one_part = 1598./15000. * T.mean(T.square(y_pred[y_true==1] - y_true[y_true==1]), axis=-1)
    print zero_part,one_part
    return zero_part + one_part

def mean_squared_error(y_true, y_pred):
    a = K.mean(K.square(y_pred - y_true), axis=-1)
    print a
    return a

def binary_crossentropy_with_ranking(y_true, y_pred):
    """ Trying to combine ranking loss with numeric precision"""
    # first get the log loss like normal
    logloss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)

    # next, build a rank loss

    # clip the probabilities to keep stability
    y_pred_clipped = K.clip(y_pred, K.epsilon(), 1-K.epsilon())

    # translate into the raw scores before the logit
    y_pred_score = K.log(y_pred_clipped / (1 - y_pred_clipped))

    # determine what the maximum score for a zero outcome is
    y_pred_score_zerooutcome_max = K.max(y_pred_score * (y_true <1))

    # determine how much each score is above or below it
    rankloss = y_pred_score - y_pred_score_zerooutcome_max

    # only keep losses for positive outcomes
    rankloss = rankloss * y_true

    # only keep losses where the score is below the max
    rankloss = K.square(K.clip(rankloss, -100, 0))

    # average the loss for just the positive outcomes
    rankloss = K.sum(rankloss, axis=-1) / (K.sum(y_true > 0) + 1)

    # return (rankloss + 1) * logloss - an alternative to try
    return rankloss + logloss