import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from skopt import BayesSearchCV
import shap
import matplotlib.pyplot as plt
from skopt.space import Integer, Real


# ========== 1. 数据读取 ==========
# df = pd.read_csv(r"D:\resources\data\jumbo\2014-2020_1deg_st_merged_st_chl_ssh_mld.csv")  # 包含 CPUE 和海温特征
df = pd.read_csv(r"D:\resources\data\jumbo_2020-2021M_0.25deg_ST_SSS_DO_CHL_SSH_MLD.csv")  # 包含 CPUE 和海温特征
target_col = "CPUE"

# ========== 2. 构建两种特征方案 ==========
feature_set_1 = ['Year', 'Month', 'Lon', 'Lat',
                 'ST_0.5', 'ST_47.4', 'ST_92.3', 'ST_155.9', 'ST_222.5', 'ST_318.1', 'ST_453.9']  # 垂直海温平均值
feature_set_2 = ['Year', 'Month', 'Lon', 'Lat',
                'ST_0.5_Label', 'ST_47.4_Label', 'ST_92.3_Label',
                'ST_155.9_Label', 'ST_222.5_Label', 'ST_318.1_Label', 'ST_453.9_Label']   # 多层特征

# 要排除的列
exclude_cols = ['Catch', 'Effort', 'CPUE', 'Label']

# 特征列 = 除去上述三列
feature_cols = [col for col in df.columns if col not in exclude_cols]

# 定义特征方案
feature_sets = {
    "海温平均值方案": feature_cols,
    # "多层特征方案": feature_set_2
}

# ========== 3. 定义指标函数 ==========
def evaluate(y_true, y_pred):
    mse = mean_squared_error(y_true, y_pred)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(y_true, y_pred)
    mre = np.mean(np.abs((y_true - y_pred) / (y_true + 1e-8)))  # 避免除0
    r2 = r2_score(y_true, y_pred)
    return {"MSE": mse, "RMSE": rmse, "MAE": mae, "MRE": mre, "R²": r2}

# ========== 4. 定义贝叶斯优化搜索空间 ==========
# param_space = {
#     'n_estimators': (100, 500),
#     'max_depth': (3, 20),
#     'min_samples_split': (2, 10),
#     'min_samples_leaf': (1, 5),
#     'max_features': ['auto', 'sqrt', 'log2']
# }

param_space = {
    'n_estimators': Integer(100, 800),
    'max_depth': Integer(3, 30),
    'min_samples_split': Integer(2, 10),
    'min_samples_leaf': Integer(1, 5),
    'max_features': Real(0.3, 1.0, prior='uniform')
}

# ========== 5. 对比两种特征方案 ==========
results = {}

for name, features in feature_sets.items():
    print(f"\n🧩 当前方案：{name}")
    X = df[features]
    y = df[target_col]

    # --- 归一化目标值（可选） ---
    y = np.log1p(y)  # log(CPUE + 1)，使分布更平滑

    # --- 划分训练测试集 ---
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # --- 定义贝叶斯优化搜索器 ---
    opt = BayesSearchCV(
        RandomForestRegressor(random_state=42),
        search_spaces=param_space,
        n_iter=25,
        cv=5,
        n_jobs=-1,
        scoring='neg_mean_squared_error',
        random_state=42
    )

    opt.fit(X_train, y_train)
    best_model = opt.best_estimator_

    print("✅ 最优参数：", opt.best_params_)

    # --- 测试集预测 ---
    y_pred = best_model.predict(X_test)
    metrics = evaluate(y_test, y_pred)
    results[name] = metrics

    print(f"📊 {name} 模型在测试集上的性能：")
    for k, v in metrics.items():
        print(f"   {k} = {v:.6f}")

    # ========== 6. SHAP 模型可解释性 ==========
    print("🔍 绘制 SHAP 可解释性图...")
    explainer = shap.Explainer(best_model, X_train)
    shap_values = explainer(X_test)

    plt.title(f"SHAP Summary - {name}")
    shap.summary_plot(shap_values, X_test, show=False)
    plt.tight_layout()
    plt.show()

# ========== 7. 对比两种特征方案的性能 ==========
print("\n🌊 模型性能对比结果：")
df_result = pd.DataFrame(results).T
print(df_result)

