# coding=utf-8
import pandas as pd
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
import sklearn
from sklearn.linear_model import LinearRegression

plt.rcParams.update({'figure.autolayout': True})

df = pd.DataFrame({'x': [-2.1, -0.9, 0, 1, 2, 2.5, 3, 4],
                   'x2': [-1.1, 1, 0, 1, 0, 1, 0, 1],
                   'y': [-10, 0, -5, 10, 20, 10, 30, 40]})
X_train = df.drop('y', axis=1)
# print X_train
Y_train = df['y']
X_pred = [-4, -3, -2, -1, 0, 0.4, 0.6, 1, 1.4, 1.6, 2, 3, 4, 5, 6, 7, 8]
# X_pred = pd.DataFrame({'x': X_pred}
X_pred = list(map(lambda x: [x, 1], X_pred))
X_pred = pd.DataFrame(X_pred, columns=['x', 'x2'])
print(X_pred)


def process_list(list_in):
    result = map(lambda x: "%8.2f" % round(float(x), 2), list_in)
    return list(result)


def customObj3(real, predict):
    x = predict - real
    grad = 2.0 * x
    assert isinstance(x, np.ndarray)
    hess = [2.0 + i*0.0 for i in range(len(x))]
    hess = np.array(hess)
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess), type(hess), '\n'

    # print
    # print np.ndarray(hess)
    return grad, hess


def customObj1(real, predict):
    grad = predict - real
    hess = np.power(np.abs(grad), 0.5)
    return grad, hess


def log_cosh_obj(real, predict):
    x = predict - real
    grad = np.tanh(x)
    # hess = 1 / np.cosh(x)**2
    hess = 1.0 - np.tanh(x) ** 2
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


def fair_obj(real, predict):
    """y = c * abs(x) - c**2 * np.log(abs(x)/c + 1)"""
    x = predict - real
    c = 1
    den = abs(x) + c
    grad = c * x / den
    hess = c * c / den ** 2
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    # y = c * abs(x) - c ** 2 * np.log(abs(x) / c + 1)
    return grad, hess


def huber_approx_obj(real, predict):
    d = predict - real
    h = 1  # h is delta in the graphic
    scale = 1 + (d / h) ** 2
    scale_sqrt = np.sqrt(scale)
    grad = d / scale_sqrt
    hess = 1 / scale / scale_sqrt
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


def logregobj(real, predict):
    labels = real
    predict = 1.0 / (1.0 + np.exp(-predict))
    grad = predict - labels
    hess = predict * (1.0 - predict)
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


def custom_loss(real, predict):
    penalty = 2.0
    grad = -real / predict + penalty * (1 - real) / (1 - predict)  # 梯度
    hess = real / (predict ** 2) + penalty * (1 - real) / (1 - predict) ** 2  # 2阶导
    return grad, hess


def mte(real, predict):
    grad = 3 * predict * predict - 6 * predict * real + 3 * real * real
    hess = 6 * (predict - real)
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


def m4e(real, predict):
    grad = 4.0 * predict * predict * predict - 12.0 * predict * predict * real + 12.0 * predict * real * real - 4.0 * real * real
    hess = 12.0 * predict * predict - 24.0 * predict * real + 12.0 * real * real
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


def mae(real, predict):
    x = predict - real
    if isinstance(x, np.ndarray):
        x = x.tolist()

    grad = []
    for i in x:
        if i > 0:
            h = 1.0
        elif i < 0:
            h = -1.0
        else:
            h = 0.0
        grad.append(h)
    grad = np.array(grad)
    hess = np.array([0.1 + i*0.0 for i in range(len(grad))])
    # print 'predict', process_list(predict.tolist()), type(predict)
    # print ' real  ', process_list(real.tolist()), type(real)
    # print ' grad  ', process_list(grad.tolist()), type(grad)
    # print ' hess  ', process_list(hess.tolist()), type(hess), '\n'
    return grad, hess


# https://www.cnblogs.com/fujian-code/p/9804129.html
def custom_asymmetric_train(y_true, predict):
    residual = (y_true - predict).astype("float")
    grad = np.where(residual < 0, -2*10.0*residual, -2*residual)
    hess = np.where(residual < 0, 2*10.0, 2.0)
    return grad, hess


def custom_asymmetric_valid(y_true, predict):
    residual = (y_true - predict).astype("float")
    loss = np.where(residual < 0, (residual**2)*10.0, residual**2)
    return "custom_asymmetric_eval", np.mean(loss)
# =======================================================


for n_estimators in range(5, 100, 5):
    booster_str = "gblinear"
    model = xgb.XGBRegressor(objective=huber_approx_obj,
                             booster=booster_str,
                             n_estimators=n_estimators)
    model2 = xgb.XGBRegressor(objective="reg:linear",
                              booster=booster_str,
                              n_estimators=n_estimators)
    model3 = xgb.XGBRegressor(objective=fair_obj,
                              booster=booster_str,
                              n_estimators=n_estimators)
    model4 = xgb.XGBRegressor(objective=log_cosh_obj,
                              booster=booster_str,
                              n_estimators=n_estimators)
    model5 = xgb.XGBRegressor(objective=m4e,
                              booster=booster_str,
                              n_estimators=n_estimators)
    model6 = LinearRegression(normalize=True)

    model.fit(X=X_train, y=Y_train)
    model2.fit(X=X_train, y=Y_train)
    model3.fit(X=X_train, y=Y_train)
    model4.fit(X=X_train, y=Y_train)
    model5.fit(X=X_train, y=Y_train)
    model6.fit(X=X_train, y=Y_train)

    y_pred = model.predict(data=X_pred)
    y_pred2 = model2.predict(data=X_pred)
    y_pred3 = model3.predict(data=X_pred)
    y_pred4 = model4.predict(data=X_pred)
    y_pred5 = model5.predict(data=X_pred)
    y_pred6 = model6.predict(X=X_pred)

    plt.figure(figsize=(6, 5))
    plt.axes().set(title='n_estimators=' + str(n_estimators))

    plt.plot(df['x'], df['y'], marker='o', linestyle=":", label="Real Y")
    plt.plot(X_pred['x'], y_pred, label="xgb->huber_approx_obj")
    plt.plot(X_pred['x'], y_pred3, label="xgb->fair_obj")
    plt.plot(X_pred['x'], y_pred4, label="xgb->Log-cosh")
    plt.plot(X_pred['x'], y_pred5, label="xgb->m4e")
    plt.plot(X_pred['x'], y_pred2, label="xgb->reg:linear")
    plt.plot(X_pred['x'], y_pred6, label="LinearRegression")

    plt.xlim(-4.5, 8.5)
    plt.ylim(-25, 55)

    plt.legend()
    plt.show()
    # plt.savefig("output/n_estimators_"+str(n_estimators)+".jpg")
    plt.close()
    print(n_estimators)
