import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split

# 生成数据
X, y = make_regression(n_samples=1000, n_features=5, noise=0.1, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 参数
n_trees = 50  # 多少棵树
learning_rate = 0.1  # 学习率

# 初始化预测值（全部为 0）
y_pred_train = np.zeros_like(y_train)
y_pred_test = np.zeros_like(y_test)

# 训练梯度提升决策树
trees = []
for i in range(n_trees):
    residuals = y_train - y_pred_train  # 计算残差（负梯度方向）

    tree = DecisionTreeRegressor(max_depth=3)  # 这里使用较浅的树
    tree.fit(X_train, residuals)  # 让树学习残差
    trees.append(tree)

    # 更新预测值（累加弱学习器的结果）
    y_pred_train += learning_rate * tree.predict(X_train)
    y_pred_test += learning_rate * tree.predict(X_test)

    # 计算损失
    mse = mean_squared_error(y_train, y_pred_train)
    print(f"Iteration {i + 1}: MSE = {mse:.4f}")

# 计算最终测试集误差
final_mse = mean_squared_error(y_test, y_pred_test)
print(f"\nFinal Test MSE: {final_mse:.4f}")
