#coding:utf-8
import math
import datetime

from pandas.core.frame import DataFrame
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_percentage_error, adjusted_rand_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error

from alg_model.util.DataUtil import *

def run(x=[],y=[],dateList=[],dataList=[],predictNum = 0,batch=10):
    # 获取数据
    x_train, x_test, y_train, y_test = splitData(x,y)
    # 定义XGBoost回归预测模型
    model = XGBRegressor(max_depth=10 # 树的最大深度
                         , learning_rate=0.1 # 学习率
                         , n_estimators=200 # 弱学习器数量
                         , min_child_weight=3 # 最小叶子节点样本权重，用来减少对局部特殊样本的学习，这个值太高容易欠拟合
                         #, silent = 0 # 不开启静默模式，输出信息
                         , objective='reg:gamma' #  模型
                         , n_jobs=4 # 使用的CPU核心数
                         ,seed = 6 # 随机数种子
                         )
    # 训练
    print("------------------------------------------- XGBoost回归预测模型：-------------------------------------------")
    model.fit(x_train, y_train)
    y_pre = model.predict(x_test)
    # 评估分数
    mae = mean_absolute_error(y_pred=y_pre, y_true=y_test)
    print("平均绝对误差MAE:", mae)
    mse = mean_squared_error(y_pred=y_pre, y_true=y_test)
    print("均方根误差RMSE:", math.sqrt(mse))
    print("均方误差MSE:", mse)
    mape = mean_absolute_percentage_error(y_pred=y_pre, y_true=y_test)
    print("平均绝对百分比误差MAPE:", mape)
    # 预测
    for i in range(predictNum):
        # 加一天
        cur_day = datetime.datetime.strptime(dateList[len(dateList)-1], '%Y/%m/%d')
        offset = datetime.timedelta(days=1)
        pre_day = (cur_day+offset).strftime('%Y/%m/%d')
        dateList.append(pre_day)
        # 预测数据
        l = len(dataList)
        data = DataFrame([dataList[l-batch:l]])
        pre = model.predict(data).tolist()
        # print("data = ", data, " pre = ", pre)
        # print(data)
        dataList.append(pre[0])
    return (dateList,dataList)
if __name__ == "__main__":
    run([],[])
