from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from skopt import BayesSearchCV
from skopt.space import Real, Integer
import pandas as pd
import numpy as np
import joblib


# 加载数据
data = pd.read_csv("E:/GraduateDesign/LinearUse_processed.csv")
X = data.drop(columns=['rrr'])
y = data['rrr']

# 分割数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True)

# 定义贝叶斯搜索空间
search_spaces = {
    'n_estimators': Integer(50, 500),          # 树的数量
    'learning_rate': Real(0.01, 0.3, prior='log-uniform'),  # 学习率
    'max_depth': Integer(3, 10),               # 最大深度
    'min_samples_split': Integer(2, 10),       # 分裂最小样本数
    'min_samples_leaf': Integer(1, 10),        # 叶节点最小样本数
    'subsample': Real(0.5, 1.0),               # 子采样比例
    'max_features': Real(0.5, 1.0)             # 特征采样比例
}

# 创建贝叶斯优化器
opt = BayesSearchCV(
    estimator=GradientBoostingRegressor(random_state=42),
    search_spaces=search_spaces,
    n_iter=30,          # 迭代次数（可根据需要增加）
    cv=5,               # 交叉验证折数
    n_jobs=-1,          # 使用全部CPU核心
    random_state=42,
    scoring='neg_mean_squared_error'  # 优化目标（负均方误差）
)

# 执行参数优化
opt.fit(X_train, y_train)

# 输出最佳参数
print("Best parameters found:")
print(opt.best_params_)

# 使用最佳模型进行预测
best_model = opt.best_estimator_
y_pred = best_model.predict(X_test)

# 评估指标
print(f"\nRMSE: {mean_squared_error(y_test, y_pred, squared=False)}")
print(f"MAE: {mean_absolute_error(y_test, y_pred)}")

# 计算MAPE（自动过滤零值）
mask = y_test != 0
mape = np.mean(np.abs((y_test[mask] - y_pred[mask]) / y_test[mask])) * 100
print(f"MAPE: {mape:.2f}%")

# R²指标
r2 = r2_score(y_test, y_pred)
print(f"R²: {r2}")

# 特征重要性可视化（可选）
import matplotlib.pyplot as plt
plt.barh(X.columns, best_model.feature_importances_)
plt.title("Feature Importances")
plt.show()



# 保存最佳模型到文件
#joblib.dump(best_model, 'model.pkl')
