# Importing the libraries
# -*- coding: utf-8 -*-
# @Author : 罗天天
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
import joblib  # 直接导入，已经从sklearn中独立
import numpy as np
import pandas as pd


def main():
    dataset = pd.read_excel('../data/date.xlsx')
    X = dataset.iloc[5:, :].values
    print("======原始数据的形状========")
    print(np.array(X).shape)
    X = dataset.iloc[5:,
        [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57,
         59, 61, 63, 65]].values
    print(np.array(X).shape)

    X = [[row[i] for row in X] for i in range(len(X[0]))]
    print("======数据的长度========")
    # print(X)
    y = [345, 345, 345, 345, 345, 347, 338, 338, 341, 344, 345, 345, 345, 345, 345, 345, 347, 351, 347, 347, 347, 347,
         345, 345, 345, 345, 349, 347, 345, 345, 347, 347, 345]
    print(np.array(y).shape)
    print("=======X_train Y_train--X_test-Y_test形状======")
    # print(y)
    np.array(X)
    X_train = X[0:20]
    Y_train = y[0:20]
    # X_train = X
    # Y_train = y
    X_test = X[20:-1]
    Y_test = y[20:-1]
    print(np.array(X_train).shape)
    print(np.array(X_test).shape)
    print(np.array(Y_train).shape)
    print(np.array(Y_test).shape)


    # print(np.array(X_train).shape[0])
    # print("=============X_train==============")
    # print(X_train)
    # GBDTModel.train(X_train, Y_train, name)
    # GBDT_train(X_train, Y_train)  # 模型训练
    # GBDTModel.predict(X_test[-1], name)  # 模型预测
    maxBias = 0
    print("============模型预测=============")
    maxBias = bias(X_test, Y_test, maxBias)
    # maxBias = bias(X, y, maxBias)
    print("============测试最大误差=============")
    print("最大误差:%s" % (maxBias))


def bias(X, Y, maxBias):
    Bi = Y
    for i in range(np.array(X).shape[0]):
        predict = GBDT_Predict(X[i])
        print("预测值:%s,真实值：%s" % (predict[0][0], Y[i]))
        bias = Y[i] - predict
        Bi[i] = bias[0][0]
        print("偏差:%s" % (bias[0][0]))
        if bias < 0:
            bias = -bias

        if bias > maxBias:
            maxBias = bias[0][0]
    # print(Bi)
    return maxBias


def GBDT_train(X, Y):
    # print(X.head())
    # print(Y.head())
    for i in range(num_of_index):  # 训练16个模型，即输出值
        # print(Y.iloc[:200,i].head())
        # x_train, x_test, y_train, y_test = train_test_split(X, Y.iloc[:200,i].astype("str").values)
        x_train, x_test, y_train, y_test = train_test_split(X, Y)

        # 模型训练，使用GBDT算法   默认75%做训练 ， 25%做测试

        '''GradientBoostingRegressor参数介绍
          @n_estimators: 子模型的数量，默认为100     200  2 2 0.1
          @max_depth   ：最大深度 ，默认3
          @min_samples_split ：分裂最小样本数
          @learning_rate ：学习率
        '''
        gbr = GradientBoostingRegressor(n_estimators=100, max_depth=3, min_samples_split=9, learning_rate=0.01)
        gbr.fit(x_train, y_train)
        joblib.dump(gbr, name + "train_model_" + str(i) + "_result.m")  # 保存模型

        y_gbr = gbr.predict(x_train)
        y_gbr1 = gbr.predict(x_test)
        acc_train = gbr.score(x_train, y_train)
        acc_test = gbr.score(x_test, y_test)
        print(name + "train_model_" + str(i) + "_result.m" + '训练准确率', acc_train)
        print(name + "train_model_" + str(i) + "_result.m" + '验证准确率', acc_test)


# 加载模型并预测
def GBDT_Predict(X):
    X_Pred = X
    #
    # print("预测：")
    # print(X_Pred)
    X_Pred = np.reshape(X_Pred, (1, -1))
    for i in range(num_of_index):
        gbr = joblib.load(name + "train_model_" + str(i) + "_result.m")  # 加载模型
        # test_data = pd.read_csv(r"./data_test.csv")
        test_y = gbr.predict(X_Pred)
        # print("--------test_y------------")
        # print(test_y)
        test_y = np.reshape(test_y, (1, -1))
        # print(test_y)
        return test_y


name = "../data/GBDT"
num_of_index = 1
if __name__ == "__main__":
    main()
