import pandas as pd
import numpy as np
from interpret.glassbox import ExplainableBoostingRegressor
from interpret import show
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split, GridSearchCV

# ========== 1. 数据读取 ==========
df = pd.read_csv(r"D:\resources\data\jumbo\2014-2020_1deg_st_merged_st_chl_ssh_mld.csv")  # 包含 CPUE 和海温特征
target_col = "CPUE"

# ========== 2. 构建两种特征方案 ==========
feature_set_1 = ['Year', 'Month', 'Lon', 'Lat',
                 'ST_0.5', 'ST_47.4', 'ST_92.3', 'ST_155.9', 'ST_222.5', 'ST_318.1', 'ST_453.9']  # 垂直海温平均值
feature_set_2 = ['Year', 'Month', 'Lon', 'Lat',
                'ST_0.5_Label', 'ST_47.4_Label', 'ST_92.3_Label',
                'ST_155.9_Label', 'ST_222.5_Label', 'ST_318.1_Label', 'ST_453.9_Label']   # 多层特征
categorical_cols = [
    'ST_0.5_Label', 'ST_47.4_Label', 'ST_92.3_Label',
    'ST_155.9_Label', 'ST_222.5_Label', 'ST_318.1_Label', 'ST_453.9_Label'
]
# 将它们转换为分类变量
for col in categorical_cols:
    df[col] = df[col].astype('category')
# 定义特征方案
feature_sets = {
    "海温平均值方案": feature_set_1,
    "多层特征方案": feature_set_2
}

X = df[feature_set_2]
y = df[target_col]
y = np.log1p(y)  # log(CPUE + 1)，使分布更平滑

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# ========== 4. 定义 EBM 模型与参数网格 ==========
ebm = ExplainableBoostingRegressor(random_state=42)

param_grid = {
    'interactions': [1, 10, 15],          # 特征交互数量
    'max_leaves': [2, 5, 10],             # 每个特征分段数
    'learning_rate': [0.05, 0.1, 0.2], # 学习率
    'min_samples_leaf': [2, 5, 10],      # 每个叶子最少样本数
}

# ========== 5. 网格搜索 ==========
grid_search = GridSearchCV(
    estimator=ebm,
    param_grid=param_grid,
    scoring='neg_mean_squared_error',  # 以最小化MSE为目标
    cv=3,                              # 3折交叉验证
    n_jobs=-1,                         # 并行运算
    verbose=2
)

print("🔍 开始 EBM 网格搜索超参数...")
grid_search.fit(X_train, y_train)

# ========== 6. 输出最优参数 ==========
print("\n✅ 最优参数组合：")
print(grid_search.best_params_)

# ========== 7. 使用最优模型进行预测 ==========
best_ebm = grid_search.best_estimator_
y_pred = best_ebm.predict(X_test)

# ========== 8. 模型性能评估 ==========
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
mre = np.mean(np.abs((y_test - y_pred) / (y_test + 1e-8)))
r2 = r2_score(y_test, y_pred)

print("\n🎯 最优EBM模型评估结果 (测试集)")
print(f"MSE  = {mse:.6f}")
print(f"RMSE = {rmse:.6f}")
print(f"MAE  = {mae:.6f}")
print(f"MRE  = {mre:.6f}")
print(f"R²   = {r2:.6f}")

# ========== 9. 可解释性分析 ==========
# print("\n📊 生成全局可解释性报告...")
# ebm_global = best_ebm.explain_global()
#
# # 保存交互式可视化结果为 HTML 文件，便于在 PyCharm 打开
# show(ebm_global)

# --- 局部解释（查看单样本贡献）---
# sample_index = 0  # 可改成任意样本索引
# ebm_local = ebm.explain_local(X_test.iloc[[sample_index]], y_test.iloc[[sample_index]])
# show(ebm_local)
