from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import pandas as pd
import optuna

# 假设 df 是包含数据的 DataFrame，其中 'Temperature' 是目标变量
# 'Factor1', 'Factor2', ..., 'Factor18' 是影响因子
df = pd.read_excel('F:\machinelearningforpython\随机森林/150.xlsx')
features = df[['CVI', 'GVI', 'NDBI_150', 'NDVI_150']]
target = df['LST']
def objective(trial):
    # 2. 使用trial对象建议超参数取值
    test_size = trial.suggest_float('test_size', 0.25, 0.35)
    n_estimators = trial.suggest_int('n_estimators', 50, 200)

    # 划分数据集
    X_train, X_test, y_train, y_test = train_test_split(features, target, test_size, random_state=100)

    # 定义随机森林模型
    rf_model = RandomForestRegressor(n_estimators=n_estimators)
    rf_model.fit(X_train, y_train)
    # 预测
    y_pred = rf_model.predict(X_test)
    feature_importances = rf_model.feature_importances_

    # 评估模型性能
    mse = mean_squared_error(y_test, y_pred)  # 均方误差
    mae = mean_absolute_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)


    return mse
study = optuna.create_study(direction='minimize')

# 运行Optuna搜索
study.optimize(objective, n_trials=100)

# 打印最佳超参数和得分
print('Best hyperparameters: ', study.best_params)

print('Best score: ', study.best_value)
#最优超参数集合
Hy = study.best_params

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(features, target, Hy['test_size'], random_state=100)

# 定义随机森林模型
rf_model = RandomForestRegressor(n_estimators=Hy['n_estimators'])
rf_model.fit(X_train, y_train)
# 预测
y_pred = rf_model.predict(X_test)
feature_importances = rf_model.feature_importances_

# 评估模型性能
mse = mean_squared_error(y_test, y_pred)  # 均方误差
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f'Mean Squared Error: {mse}')
print('Mean Absolute Error (MAE):', mae)
print('R-squared (R2):', r2)
print(f'feature_importances:{feature_importances}')

# 真实预测散点图绘制
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 14})  # 设置默认字体大小为 18
import numpy as np

plt.scatter(y_test, y_pred, color='blueviolet', alpha=0.5, label='True vs Predicted Values')
z = np.polyfit(y_test, y_pred, 1)
p = np.poly1d(z)
plt.plot(y_test, p(y_test), color='crimson', linewidth=1)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.title('True vs Predicted Values')
plt.plot([min(y_test), max(y_test)], [min(y_test), max(y_test)], color='gray', linestyle='--', linewidth=1, label='y=x')
plt.text(min(y_test), max(y_pred), f' y = {z[0]:.2f}x + {z[1]:.2f}', fontsize=10, ha='left')
plt.text(max(y_test), max(y_pred), f'R^2 = {r2:.2f}', fontsize=10, ha='left')
plt.show()


# 获取特征重要性和特征名称，并按照特征重要性排序
feature_importances = rf_model.feature_importances_
feature_names = X_train.columns
sorted_idx = feature_importances.argsort()[::-1]
sorted_feature_names = [feature_names[i] for i in sorted_idx]
sorted_feature_importances = feature_importances[sorted_idx]

# 创建排序后的柱状图
plt.subplots_adjust(right=0.8)  # 调整右边界，使特征重要性值在图框内显示
plt.figure(figsize=(12, 6))
plt.barh(sorted_feature_names, sorted_feature_importances, color='lightgray')
for i, v in enumerate(sorted_feature_importances):
    plt.text(v, i, f' {v:.3f}', color='black', va='center', fontsize=12)
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.title('Feature Importance in Random Forest Model (Sorted)')
plt.gca().invert_yaxis()
plt.xlim(0, max(sorted_feature_importances)+0.05)
plt.show()


#pdp
from sklearn.inspection import plot_partial_dependence
import matplotlib.pyplot as plt
features = ['NDBI_150']
plot_partial_dependence(rf_model, X_train, features)
plt.show()
features = ['NDVI_150']
plot_partial_dependence(rf_model, X_train, features)
plt.show()



