import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from skopt import BayesSearchCV
import shap
import matplotlib.pyplot as plt
from skopt.space import Integer, Real

# ========== 1. 数据读取 ==========
df = pd.read_csv(r"D:\resources\data\jumbo\2014-2020_1deg_st_merged_st_chl_ssh_mld.csv")
target_col = "CPUE"

# ========== 2. 构建两种特征方案 ==========
feature_set_1 = ['Year', 'Month', 'Lon', 'Lat',
                 'ST_0.5', 'ST_47.4', 'ST_92.3',
                 'ST_155.9', 'ST_222.5', 'ST_318.1', 'ST_453.9']  # 垂直海温平均值
feature_set_2 = ['Year', 'Month', 'Lon', 'Lat',
                 'ST_0.5_Label', 'ST_47.4_Label', 'ST_92.3_Label',
                 'ST_155.9_Label', 'ST_222.5_Label', 'ST_318.1_Label', 'ST_453.9_Label']  # 多层特征

feature_sets = {
    "ST_mean": feature_set_1,
    "ST_feature": feature_set_2
}

# ========== 3. 评估指标函数 ==========
def evaluate(y_true, y_pred):
    mse = mean_squared_error(y_true, y_pred)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(y_true, y_pred)
    mre = np.mean(np.abs((y_true - y_pred) / (y_true + 1e-8)))  # 避免除0
    r2 = r2_score(y_true, y_pred)
    return {"MSE": mse, "RMSE": rmse, "MAE": mae, "MRE": mre, "R²": r2}

# ========== 4. 贝叶斯优化搜索空间 ==========
param_space = {
    'n_estimators': Integer(100, 800),
    'max_depth': Integer(3, 30),
    'min_samples_split': Integer(2, 10),
    'min_samples_leaf': Integer(1, 5),
    'max_features': Real(0.3, 1.0, prior='uniform')
}

# ========== 5. 对比两种特征方案 ==========
results = {}

for name, features in feature_sets.items():
    print(f"\n🧩 当前方案：{name}")
    X = df[features].copy()
    y = df[target_col].values.reshape(-1, 1)

    # --- 目标变量归一化 ---
    y_scaler = MinMaxScaler()
    y_scaled = y_scaler.fit_transform(y)

    # --- 划分训练集 / 测试集 ---
    X_train, X_test, y_train, y_test = train_test_split(X, y_scaled, test_size=0.2, random_state=42)

    # --- 确保数值型数据 ---
    X_train = X_train.apply(pd.to_numeric, errors='coerce').fillna(0).astype(float)
    X_test = X_test.apply(pd.to_numeric, errors='coerce').fillna(0).astype(float)

    # --- 贝叶斯优化搜索器 ---
    opt = BayesSearchCV(
        RandomForestRegressor(random_state=42),
        search_spaces=param_space,
        n_iter=25,
        cv=5,
        n_jobs=-1,
        scoring='neg_mean_squared_error',
        random_state=42
    )

    opt.fit(X_train, y_train.ravel())
    best_model = opt.best_estimator_

    print("✅ 最优参数：", opt.best_params_)

    # --- 测试集预测 ---
    y_pred = best_model.predict(X_test)

    # --- 不反归一化，直接在 [0,1] 区间计算指标 ---
    metrics = evaluate(y_test, y_pred)
    results[name] = metrics

    print(f"📊 {name} 模型在测试集（归一化空间）上的性能：")
    for k, v in metrics.items():
        print(f"   {k} = {v:.6f}")

    # ========== 6. SHAP 可解释性分析 ==========
    print("🔍 绘制 SHAP 可解释性图...")
    # explainer = shap.Explainer(best_model, X_train)
    # shap_values = explainer(X_test)
    explainer = shap.Explainer(best_model, X)
    shap_values = explainer(X)

    plt.title(f"SHAP Summary - {name}")
    # shap.summary_plot(shap_values, X_test, show=False)
    shap.summary_plot(shap_values, X, show=False)
    plt.tight_layout()
    plt.show()

    print("\n 绘制 SHAP 部分依赖图...")

    # 可视化每个特征的部分依赖关系
    # 注意：XGBoost 中类别特征若未独热编码，需要保持为数值型或类别编码型
    for feature in X.columns:
        print(f"绘制特征 {feature} 的部分依赖图...")
        plt.figure()
        shap.dependence_plot(
            feature,
            shap_values.values,
            X,
            interaction_index=None,  # 不显示二阶交互
            show=False
        )
        plt.title(f"SHAP Partial Dependence for {feature}")
        plt.tight_layout()
        plt.show()

# ========== 7. 方案性能对比 ==========
print("\n🌊 模型性能对比结果：")
df_result = pd.DataFrame(results).T
print(df_result)
