import numpy as np
from scipy.misc import derivative


def py_ghmc_loss(x, t, bins=10):
    """
    Gradient harmony classification loss.
    this function can run as a demon, used for test.

    :param x: numpy.ndarray, prediction values
    :param t: numpy.ndarray, target values
    :param bins: int, histogram bins of loss function gradient
    :return: numpy.ndarray
    """
    # encode targets to one hot
    n_classes = len(np.unique(t))
    t = np.eye(n_classes)[t]

    # sigmoid => pseudo probability
    p = 1 / (1 + np.exp(-x))
    g = np.abs(p - t)

    # Statistics the histogram of gradient norm
    g_hist, _ = np.histogram(g, bins, range=(0, 1))
    g_index = (g * bins).astype(np.int)
    valid_mask = g_hist > 0

    # calculate the weight by above histogram
    g_weight = np.zeros_like(g_hist, dtype=np.float)
    g_weight[valid_mask] = np.reciprocal(g_hist[valid_mask].astype(np.float))

    # map g_weight to examples, then normlize it
    g_weight = g_weight[g_index] / valid_mask.sum()
    ghm_loss = -(t * np.log(p) + (1 - t) * np.log(1 - p)) * g_weight
    return ghm_loss.sum()


def py_ghmr_loss(x, t, bins=10, mu=0.02):
    """
    Gradient harmony regression loss.
    this function can run as a demon, used for test.

    :param x: numpy.ndarray, shape(N, ), prediction values
    :param t: numpy.ndarray, shape(N, ), target values
    :param bins: int, histogram bins of loss function gradient
    :param mu: ref the paper
    :return: numpy.ndarray
    """
    # calculate the loss
    d = x - t
    loss = np.sqrt(d * d + mu * mu) - mu

    # calculate gradient of the loss funtion
    g = np.abs(d / np.sqrt(mu * mu + d * d))

    # Statistics the histogram of gradient norm
    g_hist, _ = np.histogram(g, bins, range=(0, 1))
    g_index = (g * bins).astype(np.int)
    valid_mask = g_hist > 0

    # calculate the weight by above histogram
    g_weight = np.zeros_like(g_hist, dtype=np.float)
    g_weight[valid_mask] = np.reciprocal(g_hist[valid_mask].astype(np.float))

    # map g_weight to examples, then normlize it
    g_weight = g_weight[g_index] / valid_mask.sum()
    ghm_loss = loss * g_weight
    return ghm_loss.sum()


def lgb_ghmc_loss(y_pred, dtrain, bins=10):
    targets = dtrain.label.reshape(-1)

    def ghmc_loss(x):
        return py_ghmc_loss(x, targets, bins)

    grad = derivative(ghmc_loss, y_pred, n=1, dx=1e-6)
    hess = derivative(ghmc_loss, y_pred, n=2, dx=1e-6)
    return grad, hess


def lgb_ghmr_loss(y_pred, dtrain, bins=10, mu=0.02):
    targets = dtrain.label

    def ghmr_loss(x):
        return py_ghmr_loss(x, targets, bins, mu)

    grad = derivative(ghmr_loss, y_pred, n=1, dx=1e-6)
    hess = derivative(ghmr_loss, y_pred, n=2, dx=1e-6)
    return grad, hess


if __name__ == '__main__':
    """
    在lightgbm中使用上面的loss时，参照如下方式使用:

    ```python
    focal_loss = lambda x, y: lgb_ghmr_loss(x, y, 0.25, 1.)
    eval_error = lambda x, y: your_eval_fun(x, y)

    lgbtrain = lgb.Dataset(X_tr, y_tr, free_raw_data=True)
    lgbeval = lgb.Dataset(X_val, y_val)
    params  = {'learning_rate':0.1, 'num_boost_round':10}
    model = lgb.train(params, lgbtrain, valid_sets=[lgbeval], fobj=focal_loss, feval=eval_error)
    ```
    """

    np.random.seed(2020)

    output = np.random.randn(8, 3)
    target = np.random.randint(0, 3, (8,))
    print(output)
    print(target)

    gh = py_ghmc_loss(output, target)
    print(gh)

    output = np.random.randn(8, 1)
    target = np.random.randn(8, 1)
    print(output)
    print(target)

    gh = py_ghmr_loss(output, target)
    print(gh)
