"""
ref:
    github: https://github.com/jrzaurin/LightGBM-with-Focal-Loss/
    blog: https://towardsdatascience.com/lightgbm-with-the-focal-loss-for-imbalanced-datasets-9836a9ae00ca
"""

import numpy as np
from scipy.misc import derivative
from sklearn.metrics import f1_score


def lgb_binary_focal_loss(y_pred, dtrain, alpha, gamma):
    """
    Focal Loss for lightgbm, ref See original paper https://arxiv.org/pdf/1708.02002.pdf
    only for binary classification task

    :param y_pred: numpy.ndarray, array with the predictions
    :param dtrain: lightgbm.Dataset,
    :param alpha: float, pos weight
    :param gamma: float, exponent of focal loss
    :return: (numpy.ndarray, numpy.ndarray), (first derivative, second derivative)
    """
    # target
    t = dtrain.label

    def focal_loss(x):
        # sigmoid => pseudo probability
        p = 1 / (1 + np.exp(-x))
        a = t * alpha + (1 - t)(1 - alpha)
        g_coef = (1 - (t * p + (1 - t) * (1 - p))) ** gamma
        bce = t * np.log(p) + (1 - t) * np.log(1 - p)
        return - a * g_coef * bce

    grad = derivative(focal_loss, y_pred, n=1, dx=1e-6)
    hess = derivative(focal_loss, y_pred, n=2, dx=1e-6)
    return grad, hess


def lgb_focal_loss(y_pred, dtrain, alpha, gamma):
    """
    Focal Loss for lightgbm, ref See original paper https://arxiv.org/pdf/1708.02002.pdf
    for mulit-classification task

    :param y_pred: numpy.ndarray, array with the predictions
    :param dtrain: lightgbm.Dataset,
    :param alpha: float, pos weight
    :param gamma: float, exponent of focal loss
    :return: (numpy.ndarray, numpy.ndarray), (first derivative, second derivative)
    """
    targets = dtrain.label.reshape(-1)
    n_classes = len(np.unique(targets))
    # encode targets to one hot
    t = np.eye(n_classes)[targets]

    def focal_loss(x):
        # sigmoid => pseudo probability
        p = 1 / (1 + np.exp(-x))
        a = t * alpha + (1 - t) * (1 - alpha)
        g_coef = (1 - (t * p + (1 - t) * (1 - p))) ** gamma
        bce = t * np.log(p) + (1 - t) * np.log(1 - p)
        return - (a * g_coef * bce).sum(axis=1)

    grad = derivative(focal_loss, y_pred, n=1, dx=1e-6)
    hess = derivative(focal_loss, y_pred, n=2, dx=1e-6)
    return grad, hess


def lgb_f1_score(preds, lgb_dataset, average='binary'):
    """
    Implementation of the f1 score to be used as evaluation score for lightgbm

    :param preds: numpy.ndarray, array with the predictions
    :param lgb_dataset: lightgbm.Dataset
    :param average: str, average method, detail ref sklean metrics.f1_score
    :return: name, f1 score, max is better?
    """
    if preds.ndim > 1:
        y_preds = preds.argmax(axis=1)
    else:
        y_preds = [int(p > 0.5) for p in preds]
    y_true = lgb_dataset.get_label()
    return 'f1', f1_score(y_true, y_preds, average=average), True


def lgb_focal_f1_score(preds, lgb_dataset):
    """
    Adaptation of the implementation of the f1 score to be used as evaluation
    score for lightgbm. The adaptation is required since when using custom losses
    the row prediction needs to passed through a sigmoid to represent a
    probability
    :param preds: numpy.ndarray, array with the predictions
    :param lgb_dataset: lightgbm.Dataset
    :return:  name, f1 score, max is better?
    """

    preds = 1. / (1. + np.exp(-preds))
    if preds.ndim > 1:
        y_preds = preds.argmax(axis=1)
    else:
        y_preds = [int(p > 0.5) for p in preds]
    y_true = lgb_dataset.get_label()
    return 'f1', f1_score(y_true, y_preds), True


def py_focal_loss(x, t, alpha=0.25, gamma=2):
    # encode targets to one hot
    n_classes = len(np.unique(t))
    t = np.eye(n_classes)[t]
    # sigmoid => pseudo probability
    p = 1 / (1 + np.exp(-x))
    a = t * alpha + (1 - t) * (1 - alpha)
    g_coef = (1 - (t * p + (1 - t) * (1 - p))) ** gamma
    bce = t * np.log(p) + (1 - t) * np.log(1 - p)
    focal_loss = - (a * g_coef * bce).sum(axis=1)

    return focal_loss


if __name__ == '__main__':
    """
    在lightgbm中使用上面的loss时，参照如下方式使用:

    ```python
    focal_loss = lambda x, y: lgb_focal_loss(x, y, 0.25, 1.)
    eval_error = lambda x, y: lgb_f1_score(x, y)

    lgbtrain = lgb.Dataset(X_tr, y_tr, free_raw_data=True)
    lgbeval = lgb.Dataset(X_val, y_val)
    params  = {'learning_rate':0.1, 'num_boost_round':10}
    model = lgb.train(params, lgbtrain, valid_sets=[lgbeval], fobj=focal_loss, feval=eval_error)
    ```
    """

    np.random.seed(2020)
    output = np.random.randn(5, 3) * 2
    target = np.array([2, 0, 0, 1, 2])
    print('output:\n', output)
    print('target:', target)

    # test focal loss
    fl = py_focal_loss(output, target)
    print("focal loss:", fl)


    def fl_fun(x): return py_focal_loss(x, target)


    # test first derivative, second derivative
    grad = derivative(fl_fun, output, n=1, dx=1e-6)
    hess = derivative(fl_fun, output, n=2, dx=1e-6)
    print('grad:', grad)
    print('hess:', hess)
