import sklearn.metrics.ranking as sci_ranking
import numpy as np

def AUCobjective(preds, dtrain):
    grad = preds - dtrain.get_label()
    # hess = sci_ranking.roc_auc_score(y_true=dtrain.get_label(),y_score=preds)
    return grad, grad

def logregobj(preds, dtrain):
    labels = dtrain.get_label()
    preds = 1.0 / (1.0 + np.exp(-preds))
    grad = preds - labels
    hess = preds * (1.0 - preds)
    return grad, hess

def evalerror(preds, dtrain):
    # return a pair metric_name, result
    # since preds are margin(before logistic transformation, cutoff at 0)
    return 'error', sci_ranking.roc_auc_score(y_true=dtrain.get_label(), y_score=preds)
