import pandas as pd
import xgboost as xgb
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor as XGBR

# 数据集
# dataset = pd.read_csv(r"D:\GitCode\ml\data\load_rate\ai_c66.csv", engine='python')
# dataset = pd.read_csv(r"D:\GitCode\ml\data\load_rate\ai_d9e8.csv", engine='python')
dataset = pd.read_csv(r"D:\GitCode\ml\data\load_rate\ai_ai.csv", engine='python')

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(dataset.iloc[:, :-1], dataset.iloc[:, -1],
                                                    test_size=0.2, random_state=42)

# print(X_test)
# print(y_test)
dTrain = xgb.DMatrix(X_train, y_train)
dTest = xgb.DMatrix(X_test, y_test)

eta = 0.1
max_depth = 2
gamma = 0.01

params1 = {
    "objective": "reg:squarederror",
    'max_depth': 2,
    'eta': 0.05,
    'gamma': 0.0005,
    'subsample': 1,
    'lambda': 1,
    'alpha': 0,
    'colsample_bytree': 1,
    'colsample_bylevel': 1,
    'colsample_bynode': 1,
}
num_round = 400

bst_train1 = xgb.train(params1, dTrain, num_round)

# bst_train1.save_model("xgboost_v1.0.json")
# bst_train1.dump_model("xgb_model.txt")
train_pre1 = bst_train1.predict(dTest)

# 保留小数点后两位小数
train_pre1_retain2 = [round(x, 2) for x in train_pre1]
# print("保留小数点后两位", train_pre1_retain2)

length = len(train_pre1_retain2)
# print("train last Score: ", r2_score(y_test, train_pre1_retain2))

# 处理series为list类型
y_test_list = y_test.values.tolist()

print("实际值：", y_test_list)
print("预测值：", train_pre1_retain2)

labels = dTest.get_label()

# 预测值和实际值的差的绝对值
abs_error = 0
print("误差：  ", end=" ")
for i in range(length):
    err = abs(round(train_pre1_retain2[i] - labels[i], 2))
    print(err, end=", ")
    abs_error = abs_error + err
print("\n平均误差：", abs_error/float(length)*100, "%")




params2 = {
    "objective": "reg:squarederror",
    'max_depth': 2,
    'eta': 0.7,
    'gamma': 0.0005,
    'subsample': 1,
    'lambda': 1,
    'alpha': 0,
    'colsample_bytree': 1,
    'colsample_bylevel': 1,
    'colsample_bynode': 1,
}

# bst_train2 = xgb.train(params2, dTrain, num_round)
# # bst_train2.save_model(open(r"D:\GitCode\ml\data\load_rate\xgb_model.model", "wb"))
#
# train_pre2 = bst_train2.predict(dTest)
# print("train this Score: ", r2_score(y_test, train_pre2))

#
# bst_fit = XGBR(n_estimators=num_round
#                , eta=0.05
#                , max_depth=2
#                , reg_lambda=1
#                , reg_alpha=0).fit(X_train, y_train)
#
# fit_pre = bst_fit.predict(X_test)
# print("fit Score: ", r2_score(y_test, fit_pre))

# bst_fit.save_model("bst_fit.model")
