# 集成 LightGBM 与优化后的随机森林进行对比建模（含调参）

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import RFECV
from sklearn.preprocessing import LabelEncoder
# from sklearn.metrics import mean_squared_error, r2_score
import lightgbm as lgb
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error  # 添加 MAE

import matplotlib
sns.set(style='whitegrid')
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

# ========== 1. 数据加载 ==========
df = pd.read_csv('../data/cleaned_data.csv', encoding='gbk')

# ========== 2. 特征构造 ==========
df['shijian'] = pd.to_datetime(df['date'].astype(str) + ' ' + df['time'].astype(str), errors='coerce')
df['hour'] = df['shijian'].dt.hour
df['day'] = df['shijian'].dt.dayofweek
df['KH_IM_ratio'] = df['shuliaoKH'] / (df['shuliaoIM'] + 1e-6)

features = [
    'weiliaoc', 'yaotouc', 'bilengjydS1', 'bilengjedS1',
    'bilengjsdI1', 'shuliaoKH', 'shuliaoIM', 'fengjizs', 'rehao',
    'hour', 'day', 'KH_IM_ratio', 'shuliaol', 'chumoslKH', 'chumoslSM'
]
target = 'shuliaoSM'
df = df[[col for col in features + [target] if col in df.columns]].dropna()

# 编码类别字段
for col in ['weiliaoc', 'yaotouc']:
    if df[col].dtype == 'object':
        df[col] = LabelEncoder().fit_transform(df[col])

X = df.drop(columns=[target])
y = df[target]

# ========== 3. 划分训练测试集 ==========
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=62)

# ========== 4. 随机森林 RFECV 选择特征 ==========
rf = RandomForestRegressor(n_estimators=100, random_state=42)
selector = RFECV(estimator=rf, step=1, cv=5, scoring='r2')
selector.fit(X_train, y_train)
selected_features = X_train.columns[selector.support_].tolist()
X_train_sel = X_train[selected_features]
X_test_sel = X_test[selected_features]

# ========== 5. LightGBM 调参 ==========
print("\n🔍 正在调参 LightGBM...")
param_grid = {
    'n_estimators': [100, 200, 300],
    'max_depth': [4, 6, 8],
    'learning_rate': [0.05, 0.1],
    'subsample': [0.8, 1.0],
    'colsample_bytree': [0.7, 0.9]
}

grid = GridSearchCV(
    estimator=lgb.LGBMRegressor(random_state=42),
    param_grid=param_grid,
    scoring='neg_root_mean_squared_error',
    cv=5,
    verbose=1,
    n_jobs=-1
)
grid.fit(X_train_sel, y_train)

lgb_model = grid.best_estimator_
print("✅ LightGBM 最佳参数:", grid.best_params_)

# ========== 6. LightGBM 评估 ==========
y_pred_lgb = lgb_model.predict(X_test_sel)
rmse_lgb = np.sqrt(mean_squared_error(y_test, y_pred_lgb))
mae = mean_absolute_error(y_test, y_pred_lgb)  # ← 添加 MAE
r2_lgb = r2_score(y_test, y_pred_lgb)
print("\n📊 LightGBM 模型评估：")
print(f"RMSE: {rmse_lgb:.4f}")
print(f"MAE: {mae:.4f}")  # ← 输出 MAE
print(f"R²: {r2_lgb:.4f}")

# ========== 7. 可视化 LightGBM 特征重要性 ==========
lgb.plot_importance(lgb_model, max_num_features=15, importance_type='gain')
plt.title("LightGBM 特征重要性")
plt.tight_layout()
plt.show()

# ========== 8. LightGBM 预测图 ==========
plt.figure(figsize=(6,6))
plt.scatter(y_test, y_pred_lgb, alpha=0.6, edgecolors='k')
plt.plot([y.min(), y.max()], [y.min(), y.max()], '--', color='gray')
plt.xlabel("True SM")
plt.ylabel("Predicted SM")
plt.title("LightGBM 预测 vs 实际")
plt.grid(True)
plt.tight_layout()
plt.show()
