import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_friedman1
from sklearn.ensemble import GradientBoostingRegressor
import csv

# 文件夹，基础路径
root = ""
csv_go_track_tracks = open(root + "/go_track_tracks.csv", "r")
reader_tracks = csv.reader(csv_go_track_tracks)
csv_go_track_tracks_points = open(root + "/go_track_trackspoints.csv", "r")
reader_tracks_points = csv.reader(csv_go_track_tracks_points)

# 讀取文件，為什麼是繁體字
# go_track_tracks.csv
# 计数
line_count = 0
# 训练集
train_set_go_track_tracks = []
# 预测集
prediction_set_go_track_tracks = []
for line in reader_tracks:
    if line_count < 100:
        train_set_go_track_tracks.append(line)
    else:
        prediction_set_go_track_tracks.append(line)

# go_track_trackspoints.csv
line_count = 0
train_set_go_track_tracks_points = []
prediction_set_go_track_tracks_points = []
for line in reader_tracks_points:
    if line_count < 100:
        train_set_go_track_tracks_points.append(line)
    else:
        prediction_set_go_track_tracks_points.append(line)

# 来自网页：https://blog.csdn.net/wuxiaosi808/article/details/78036633
X, y = make_friedman1(n_samples=1200, random_state=0, noise=1.0)
X_train, X_test = X[:200], X[200:]
y_train, y_test = y[:200], y[200:]
est = GradientBoostingRegressor(
loss='ls',      ##默认ls损失函数'ls'是指最小二乘回归lad'（最小绝对偏差）'huber'是两者的组合
n_estimators=100, ##默认100 回归树个数 弱学习器个数
learning_rate=0.1,  ##默认0.1学习速率/步长0.0-1.0的超参数  每个树学习前一个树的残差的步长
max_depth=3,   ## 默认值为3每个回归树的深度  控制树的大小 也可用叶节点的数量max leaf nodes控制
subsample=1,  ##用于拟合个别基础学习器的样本分数 选择子样本<1.0导致方差的减少和偏差的增加
min_samples_split=2, ##生成子节点所需的最小样本数 如果是浮点数代表是百分比
min_samples_leaf=1, ##叶节点所需的最小样本数  如果是浮点数代表是百分比
max_features=None, ##在寻找最佳分割点要考虑的特征数量auto全选/sqrt开方/log2对数/None全选/int自定义几个/float百分比
max_leaf_nodes=None, ##叶节点的数量 None不限数量
min_impurity_split=1e-7, ##停止分裂叶子节点的阈值
verbose=0,  ##打印输出 大于1打印每棵树的进度和性能
warm_start=False, ##True在前面基础上增量训练 False默认擦除重新训练 增加树
random_state=0  ##随机种子-方便重现
).fit(X_train, y_train)
mean_squared_error(y_test, est.predict(X_test))
