# coding=utf-8
from functools import partial

import pandas as pd
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error

df = pd.DataFrame({'x': [0, 1, 2, 3, 4],
                   'y': [-5, 10, 20, 30, 40]})
X_train = df.drop('y', axis=1)
Y_train = df['y']
X_pred = [1, 2, 3, 4, 5, 6, 7, 8]

print(X_train)
assert isinstance(X_train, pd.DataFrame)
# print(X_train.dtypes)
# print(type(X_train))
print('=======================================================')
print(Y_train)

# print(pd.DataFrame({'x': X_pred}))
# exit()
# print(Y_train.dtypes)
# print(type(Y_train))


def customObj2(real, predict):
    # grad = np.power(predict, 1) - np.power(real, 1)
    grad = predict - real
    hess = np.power(np.abs(grad), 0.1)
    # hess = -grad

    # grad = np.power(predict, 2) - np.power(real, 2)
    # hess = np.power(np.abs(grad), 0.5)
    #
    # grad = predict * np.abs(real) - real
    # hess = np.power(np.abs(grad), 1)
    # print(np.sum(real))
    # grad = (np.power(predict, 2) - np.power(real, 2)) * np.abs(real)  * np.abs(real) / np.sum(real) + np.power(predict, 1) - np.power(real, 1)
    # hess = -grad / 2.4
    #
    # grad = np.power(predict, 2) - np.power(real, 2)
    # hess = -grad * 1.5

    print('==============================================')
    print('dtrain', predict.tolist(), type(predict))
    print('preds', real.tolist(), type(real))
    print('grad', grad.tolist(), type(grad))
    print('hess', hess.tolist(), type(hess), '\n')
    return grad, hess


def feval_func(y_pre, D_label):  # 别人的自定义损失函数
    print(y_pre, type(y_pre))
    print(D_label, type(D_label))
    # label = D_label.get_label()
    label = D_label
    penalty = 2.0
    grad = -label / y_pre + penalty * (1 - label) / (1 - y_pre)  # 梯度
    hess = label / (y_pre ** 2) + penalty * (1 - label) / (1 - y_pre) ** 2  # 2阶导
    return grad, hess  # Nx1 matrix

def huber_approx_obj(preds, dtrain):
    d = preds - dtrain  # remove .get_labels() for sklearn
    h = 1  # h is delta in the graphic
    scale = 1 + (d / h) ** 2
    scale_sqrt = np.sqrt(scale)
    grad = d / scale_sqrt
    hess = 1 / scale / scale_sqrt
    return grad, hess


mean_absolute_error_raw_values = partial(mean_absolute_error, multioutput='raw_values')


def mean_absolute_error_raw_values_hess(pred, dtrain):
    grad = mean_absolute_error_raw_values(pred, dtrain)
    return grad, - grad * 2


model = xgb.XGBRegressor(
                         # objective="reg:linear",
                         objective=customObj2,
                         booster="gblinear",  # "gbtree"
                         # n_estimators=10,
                         # max_depth=10, learning_rate=0.001, gamma=0.1,

                         )
model.fit(X=X_train, y=Y_train)
y_pred = model.predict(data=pd.DataFrame({'x': X_pred}))
plt.plot(df['y'])
plt.plot(y_pred)
plt.show()
plt.close()

T_train_xgb = xgb.DMatrix(X_train, Y_train)



params = {  # "objective": "reg:linear",
            "booster": "gblinear"
         }
gbm = xgb.train(dtrain=T_train_xgb,
                params=params,
                # feval=feval_func
                )

Y_pred = gbm.predict(xgb.DMatrix(pd.DataFrame({'x': X_pred})))

print('======================= predict =======================')
print(Y_pred)

plt.plot(df['y'])
plt.plot(Y_pred)
plt.show()
plt.close()
exit()
# date_list = []
# start_date_str = '2018-1-1'
# for i in range(28):
#     d = datetime.datetime.strptime(start_date_str, '%Y-%m-%d') + datetime.timedelta(days=i * 7)
#     date_list.append(i)
# print(date_list)
#
# data = pd.DataFrame({'date': date_list,
#                      'y': [6, 2, 4, 4, 5, 6, 7, 6, 2, 3, 4, 5, 6, 7, 6, 4, 3, 4, 5, 6, 7, 6, 4, 3, 4, 5, 6, 7]})
# print(data)
# print(data.dtypes)
# # exit(0)
#
# gbm = xgb.XGBRegressor(max_depth=3,
#                        learning_rate=0.1,
#                        n_estimators=100,
#                        silent=True,
#                        objective='reg:linear',  # "booster":"gblinear"  gbtree
#                        booster='gblinear').fit(data.ix[:, 25:], data['y'])
# predictions = gbm.predict(data.ix[:, 25:])
# actuals = data['y']
# print(mean_squared_error(actuals, predictions))
# print(predictions)

# https://datascience.stackexchange.com/questions/9483/xgboost-linear-regression-output-incorrect
# 关于用xgboost预测直线不能的问题，以及预测不准的问题
df = pd.read_csv('data/international-airline-passengers.csv')
df.columns = ['ds', 'y']
# print(df[0:100], len(df))
# exit(0)
X_train = df.drop('y', axis=1)
Y_train = df['y']

print(X_train)
assert isinstance(X_train, pd.DataFrame)
print(X_train.dtypes)
print(type(X_train))
print('=======================================================')
print(Y_train)
print(Y_train.dtypes)
print(type(Y_train))

X_ = df.index.values.tolist()
X_train = pd.DataFrame({'x': X_})

print("最终输入xgb的数据")
print(X_train)
print("---------------------")
print(Y_train)

T_train_xgb = xgb.DMatrix(X_train, Y_train)

params = {"objective": "reg:linear",
          "booster": "gblinear"}
# params = {}
gbm = xgb.train(dtrain=T_train_xgb, params=params)

for i in range(143, 250):  # 添加更多x点，以向后预测
    X_.append(i)
X_pred = X_

Y_pred = gbm.predict(xgb.DMatrix(pd.DataFrame({'x': X_pred})))  #
print('==============最终预测数据 predict =======================')
print(Y_pred)

plt.plot(df['y'])
plt.plot(Y_pred)
plt.show()
