# 导入必要的库
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import OneHotEncoder, StandardScaler

# 数据加载
file_path = r"D:\作业\hd\期末\出租车价格回归\taxi_trip_pricing.csv"
data = pd.read_csv(file_path)

# 定义列类型
numerical_columns = ['Trip_Distance_km', 'Passenger_Count', 'Base_Fare',
                     'Per_Km_Rate', 'Per_Minute_Rate', 'Trip_Duration_Minutes']
categorical_columns = ['Time_of_Day', 'Day_of_Week', 'Traffic_Conditions', 'Weather']

# 缺失值处理
for col in numerical_columns:
    data[col] = data[col].fillna(data[col].median())
for col in categorical_columns:
    data[col] = data[col].fillna(data[col].mode()[0])
data = data.dropna(subset=['Trip_Price'])

# 特征工程
encoder = OneHotEncoder(sparse_output=False, drop='first')  # 使用 sparse_output 替代 sparse
encoded_features = encoder.fit_transform(data[categorical_columns])
encoded_columns = encoder.get_feature_names_out(categorical_columns)
encoded_df = pd.DataFrame(encoded_features, columns=encoded_columns, index=data.index)

scaler = StandardScaler()
scaled_features = scaler.fit_transform(data[numerical_columns])
scaled_df = pd.DataFrame(scaled_features, columns=numerical_columns, index=data.index)

processed_data = pd.concat([scaled_df, encoded_df, data[['Trip_Price']]], axis=1)

# 数据分割
X = processed_data.drop(columns=['Trip_Price'])
y = processed_data['Trip_Price']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 简化超参数优化过程
simple_param_grid = {
    'n_estimators': [100, 150, 200],
    'max_depth': [10, 12, 15]
}

simple_grid_search = GridSearchCV(
    estimator=RandomForestRegressor(random_state=42),
    param_grid=simple_param_grid,
    scoring='neg_mean_squared_error',
    cv=3,
    verbose=2,
    n_jobs=-1
)

simple_grid_search.fit(X_train, y_train)

# 获取最佳模型和参数
best_rf_model_simple = simple_grid_search.best_estimator_
best_params_simple = simple_grid_search.best_params_

# 使用最佳模型进行预测
y_pred_simple = best_rf_model_simple.predict(X_test)

# 评估优化后的模型性能
mse_simple = mean_squared_error(y_test, y_pred_simple)
r2_simple = r2_score(y_test, y_pred_simple)

# 输出结果
print("最佳参数:", best_params_simple)
print("优化后的均方误差 (MSE):", mse_simple)
print("优化后的决定系数 (R²):", r2_simple)
param_grid_extended = {
    'n_estimators': [100, 200, 300, 500],
    'max_depth': [12, 15, 20, 25],
    'min_samples_split': [2, 5, 10],
    'min_samples_leaf': [1, 2, 4]
}

grid_search_extended = GridSearchCV(
    estimator=RandomForestRegressor(random_state=42),
    param_grid=param_grid_extended,
    scoring='neg_mean_squared_error',
    cv=3,
    verbose=2,
    n_jobs=-1
)

grid_search_extended.fit(X_train, y_train)
best_rf_model_extended = grid_search_extended.best_estimator_
best_params_extended = grid_search_extended.best_params_
y_pred_extended = best_rf_model_extended.predict(X_test)

mse_extended = mean_squared_error(y_test, y_pred_extended)
r2_extended = r2_score(y_test, y_pred_extended)

print("最佳参数:", best_params_extended)
print("扩展优化后的均方误差 (MSE):", mse_extended)
print("扩展优化后的决定系数 (R²):", r2_extended)

from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV

# 基础模型
xgb_model = XGBRegressor(random_state=42)
xgb_model.fit(X_train, y_train)

# 基础性能评估
y_pred_xgb = xgb_model.predict(X_test)
mse_xgb = mean_squared_error(y_test, y_pred_xgb)
r2_xgb = r2_score(y_test, y_pred_xgb)

print("XGBoost 基础模型 MSE:", mse_xgb)
print("XGBoost 基础模型 R²:", r2_xgb)

# 超参数优化
param_grid_xgb = {
    'n_estimators': [100, 200, 300],
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1, 0.2],
    'subsample': [0.8, 1.0],
    'colsample_bytree': [0.8, 1.0]
}

grid_search_xgb = GridSearchCV(
    estimator=XGBRegressor(random_state=42),
    param_grid=param_grid_xgb,
    scoring='neg_mean_squared_error',
    cv=3,
    verbose=2,
    n_jobs=-1
)

# 执行搜索
grid_search_xgb.fit(X_train, y_train)

# 获取最佳模型和参数
best_xgb_model = grid_search_xgb.best_estimator_
best_params_xgb = grid_search_xgb.best_params_

# 使用优化后的模型进行预测
y_pred_xgb_optimized = best_xgb_model.predict(X_test)

# 评估优化后的模型性能
mse_xgb_optimized = mean_squared_error(y_test, y_pred_xgb_optimized)
r2_xgb_optimized = r2_score(y_test, y_pred_xgb_optimized)

print("XGBoost 最佳参数:", best_params_xgb)
print("优化后的 MSE:", mse_xgb_optimized)
print("优化后的 R²:", r2_xgb_optimized)
# 前文代码运行完成后，添加以下内容

# 特征工程
data['Distance_Per_Minute'] = data['Trip_Distance_km'] / data['Trip_Duration_Minutes']
data['Price_Per_Km'] = data['Trip_Price'] / data['Trip_Distance_km']
data.replace([float('inf'), -float('inf')], 0, inplace=True)

encoder = OneHotEncoder(sparse_output=False, drop='first')
encoded_features = encoder.fit_transform(data[categorical_columns])
encoded_columns = encoder.get_feature_names_out(categorical_columns)
encoded_df = pd.DataFrame(encoded_features, columns=encoded_columns, index=data.index)

scaler = StandardScaler()
scaled_features = scaler.fit_transform(data[numerical_columns + ['Distance_Per_Minute', 'Price_Per_Km']])
scaled_df = pd.DataFrame(scaled_features, columns=numerical_columns + ['Distance_Per_Minute', 'Price_Per_Km'], index=data.index)

processed_data = pd.concat([scaled_df, encoded_df, data[['Trip_Price']]], axis=1)
X = processed_data.drop(columns=['Trip_Price'])
y = processed_data['Trip_Price']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Stacking 模型
from sklearn.ensemble import StackingRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from sklearn.ensemble import RandomForestRegressor

# 定义基础模型
rf_model = RandomForestRegressor(n_estimators=100, max_depth=12, random_state=42)
xgb_model = XGBRegressor(n_estimators=200, max_depth=3, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8, random_state=42)

# Stacking 模型
stacking_model = StackingRegressor(
    estimators=[
        ('random_forest', rf_model),
        ('xgboost', xgb_model)
    ],
    final_estimator=LinearRegression(),
    cv=3
)

# 训练融合模型
stacking_model.fit(X_train, y_train)

# 模型预测与评估
y_pred_stacking = stacking_model.predict(X_test)
mse_stacking = mean_squared_error(y_test, y_pred_stacking)
r2_stacking = r2_score(y_test, y_pred_stacking)

print("Stacking 模型 MSE:", mse_stacking)
print("Stacking 模型 R²:", r2_stacking)


# 加权平均融合
rf_model.fit(X_train, y_train)
xgb_model.fit(X_train, y_train)
y_pred_weighted = 0.6 * rf_model.predict(X_test) + 0.4 * xgb_model.predict(X_test)
print("加权平均融合模型 MSE:", mean_squared_error(y_test, y_pred_weighted))
print("加权平均融合模型 R²:", r2_score(y_test, y_pred_weighted))
import shap

# 对 Random Forest 子模型进行 SHAP 分析
explainer_rf = shap.Explainer(rf_model, X_train)  # SHAP explainer for Random Forest
shap_values_rf = explainer_rf(X_test)

# 全局特征重要性可视化 (Random Forest 子模型)
shap.summary_plot(shap_values_rf, X_test, plot_type="bar")
shap.summary_plot(shap_values_rf, X_test)

# 对 Stacking 模型进行 SHAP 分析
# 由于 Stacking 模型是组合模型，可以选择子模型进行解释
explainer_stacking = shap.Explainer(stacking_model, X_train)
shap_values_stacking = explainer_stacking(X_test)

# 全局特征重要性可视化 (Stacking 模型)
shap.summary_plot(shap_values_stacking, X_test, plot_type="bar")
shap.summary_plot(shap_values_stacking, X_test)

# 局部解释：单个样本的预测分析
sample_index = 0  # 选择一个测试集中的样本
shap.force_plot(explainer_rf.expected_value, shap_values_rf[sample_index], X_test.iloc[sample_index], matplotlib=True)
shap.force_plot(explainer_stacking.expected_value, shap_values_stacking[sample_index], X_test.iloc[sample_index], matplotlib=True)

