import shap
import numpy as np

"""
# 医疗数据集
explain_model_with_shap_generic(
    model, X_test, y_test, 
    feature_names=["年龄", "血压", "血糖", "胆固醇"],
    class_names=["健康", "患病"],
    threshold=0.3  # 医疗场景可能需要更低的阈值
)

# 金融风控数据集
explain_model_with_shap_generic(
    model, X_test, y_test,
    feature_names=["收入", "负债", "信用分", "历史逾期"],
    class_names=["低风险", "高风险"]
)
"""


def explain_model_with_shap_generic(
    model,
    X_test,
    y_test=None,
    feature_names=None,
    class_names=None,
    sample_idx=0,
    threshold=0.5,
):
    """
    通用版SHAP模型解释 - 适用于任何二分类数据集
    :param model: 训练好的XGBoost模型
    :param X_test: 测试集特征
    :param y_test: 测试集标签（可选）
    :param feature_names: 特征名称列表（可选）
    :param class_names: 类别名称列表，如["负类", "正类"]（可选）
    :param sample_idx: 要分析的样本索引
    :param threshold: 分类阈值，默认0.5
    """
    print("\n=== SHAP模型解释 (通用版) ===")

    # 创建SHAP解释器
    explainer = shap.TreeExplainer(model)

    # 计算SHAP值（使用少量样本）
    X_test_sample = X_test[:10]  # 只使用前10个样本提高速度
    shap_values = explainer.shap_values(X_test_sample)

    # 设置默认值
    if feature_names is None:
        feature_names = [f"特征_{i+1}" for i in range(X_test.shape[1])]

    if class_names is None:
        class_names = ["负类", "正类"]

    # 1. 基础概念解释
    print("\n📊 SHAP基础概念:")
    base_value_log_odds = explainer.expected_value
    base_value_prob = log_odds_to_probability(base_value_log_odds)

    print(f"• 基准值 (Base Value): {base_value_log_odds:.4f}")
    print(f"• 基准概率: {base_value_prob:.4f} ({base_value_prob*100:.1f}%)")
    print("   - 含义: 模型对所有样本的平均预测概率")
    print("   - 解释: 如果没有任何特征信息，模型会预测这个概率")
    print("=" * 50)

    # 2. 全局特征重要性
    print("\n🔍 全局特征重要性:")
    feature_importance = np.abs(shap_values).mean(0)

    for i, name in enumerate(feature_names):
        importance = feature_importance[i]
        print(f"  {name}: {importance:.4f}")

    print("\n💡 解释: 数值越大，该特征对预测的影响越大")
    print("=" * 50)

    # 3. 单个样本解释
    print(f"\n🎯 样本 {sample_idx} 的详细解释:")
    base_value_log_odds = explainer.expected_value
    sample_shap = shap_values[sample_idx]
    final_prediction_log_odds = base_value_log_odds + np.sum(sample_shap)

    # 转换为概率
    base_value_prob = log_odds_to_probability(base_value_log_odds)
    final_prediction_prob = log_odds_to_probability(final_prediction_log_odds)

    # 显示真实标签（如果提供）
    if y_test is not None:
        true_label = y_test[sample_idx]
        print(f"• 真实标签: {true_label} ({class_names[true_label]})")
    else:
        print("• 真实标签: 未提供")

    print(f"• 基准值: {base_value_log_odds:.4f} ({base_value_prob*100:.1f}%概率)")
    print(
        f"• 最终预测: {final_prediction_log_odds:.4f} ({final_prediction_prob*100:.1f}%概率)"
    )

    print("\n📈 特征贡献分析:")
    for i, name in enumerate(feature_names):
        contribution = sample_shap[i]
        print(f"  {name}: {contribution:+.4f}")

    print("\n💡 特征贡献解释:")
    print(f"• 正值: 增加预测为{class_names[1]}的概率")
    print(f"• 负值: 减少预测为{class_names[1]}的概率")
    print("• 所有特征贡献之和 + 基准值 = 最终预测概率")

    print(f"\n✅ 验证计算:")
    print(
        f"基准值 ({base_value_log_odds:.4f}) + 特征贡献总和 ({np.sum(sample_shap):.4f})"
    )
    print(f"= 最终预测值 ({final_prediction_log_odds:.4f})")
    print(f"= 最终概率 ({final_prediction_prob*100:.1f}%)")

    # 4. 概率转换解释
    print(f"\n🎲 概率转换解释:")
    print(f"• 基准概率: {base_value_prob*100:.1f}%")
    print(f"• 最终概率: {final_prediction_prob*100:.1f}%")
    print(f"• 概率变化: {final_prediction_prob - base_value_prob:+.3f}")

    if final_prediction_prob > threshold:
        print(
            f"📊 模型预测: {class_names[1]} ({final_prediction_prob*100:.1f}% 置信度)"
        )
    else:
        print(
            f"📊 模型预测: {class_names[0]} ({(1-final_prediction_prob)*100:.1f}% 置信度)"
        )

    return explainer, shap_values


def log_odds_to_probability(log_odds):
    """
    将对数几率转换为概率
    :param log_odds: 对数几率值
    :return: 概率值 (0-1之间)
    """
    return 1 / (1 + np.exp(-log_odds))
