import numpy as np
import xgboost as xgb
import shap
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

"""
XGBoost 二分类 预测是否为“virginica”
特征数：4个（花萼长度、花萼宽度、花瓣长度、花瓣宽度）
样本数：150个
"""


# 加载数据
iris = load_iris()
X, y = iris.data, iris.target

# 将问题转换为二分类：是否为virginica (类别2)
y_binary = (y == 2).astype(int)

# 展示数据类型
print("X的数据类型:", type(X))
print("y的数据类型:", type(y))
print("X的形状:", X.shape)
print("y的形状:", y.shape)
print(f"X的前5行:\n{X[:5]}")
print("非virginica (0):", np.sum(y_binary == 0))
print("virginica (1):", np.sum(y_binary == 1))


# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(
    X, y_binary, test_size=0.2, random_state=42, stratify=y_binary
)


def train_model(X_train, y_train, max_depth=3, n_estimators=100):
    """
    训练 XGBoost 模型
    :param X_train: 训练特征数据
    :param y_train: 训练目标数据
    :param max_depth: 树的最大深度
    :param n_estimators: 树的数量
    :return: 训练好的模型实例
    """
    clf = xgb.XGBClassifier(
        objective="binary:logistic",
        max_depth=max_depth,
        n_estimators=n_estimators,
    )
    clf.fit(X_train, y_train)
    return clf


def train_model_early_stop(
    X_train,
    y_train,
    max_depth=3,
    n_estimators=100,
    early_stopping_rounds=10,
    validation_size=0.2,
    random_state=42,
):
    """
    使用早停机制训练 XGBoost 二分类模型

    :param X_train: 训练特征数据
    :param y_train: 训练目标数据
    :param max_depth: 树的最大深度
    :param n_estimators: 树的最大数量
    :param early_stopping_rounds: 早停轮数，验证集性能不再提升时停止训练
    :param validation_size: 验证集占训练集的比例
    :param random_state: 随机种子
    :return: 训练好的模型实例和训练历史
    """
    # 从训练集中划分验证集
    X_train_final, X_val, y_train_final, y_val = train_test_split(
        X_train,
        y_train,
        test_size=validation_size,
        random_state=random_state,
        stratify=y_train,
    )

    # 创建 XGBoost 分类器
    clf = xgb.XGBClassifier(
        objective="binary:logistic",
        max_depth=max_depth,
        n_estimators=n_estimators,
        eval_metric=["logloss"],
        early_stopping_rounds=early_stopping_rounds,
        verbosity=1,
    )

    clf.fit(
        X_train_final,
        y_train_final,
        eval_set=[(X_train_final, y_train_final), (X_val, y_val)],
    )

    # 输出早停相关信息
    if hasattr(clf, "best_iteration"):
        print(f"最佳迭代次数: {clf.best_iteration}")
        print(f"最佳验证损失: {clf.best_score:.4f}")

    # 获取训练历史
    if hasattr(clf, "evals_result_"):
        evals_result = clf.evals_result_
        print("\n训练历史:")
        for eval_name, eval_metrics in evals_result.items():
            print(f"{eval_name}:")
            for metric_name, values in eval_metrics.items():
                print(f"  {metric_name}: {values[-1]:.4f} (最后)")

    return clf


def evaluate_model(model, X_test, y_test):
    """
    评估二分类模型性能
    :param model: 训练好的模型实例
    :param X_test: 测试集特征
    :param y_test: 测试集目标值
    :return: 准确率和分类报告
    """
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1]  # 预测概率

    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    print(f"准确率: {accuracy:.4f}")

    return accuracy, y_pred_proba


def save_model(model, model_path):
    """
    保存模型到指定路径
    :param model: 训练好的模型实例
    :param model_path: 模型保存路径
    """
    if not model_path.endswith(".model"):
        model_path += ".model"
    model.save_model(model_path)
    print(f"模型已保存至 {model_path}")


def log_odds_to_probability(log_odds):
    """
    将对数几率转换为概率
    :param log_odds: 对数几率值
    :return: 概率值 (0-1之间)
    """
    return 1 / (1 + np.exp(-log_odds))


def explain_model_with_shap(model, X_test, feature_names=None):
    """
    简化版SHAP模型解释 - 适合学习阶段
    :param model: 训练好的XGBoost模型
    :param X_test: 测试集特征
    :param feature_names: 特征名称列表
    """
    print("\n=== SHAP模型解释 (简化版) ===")

    # 创建SHAP解释器
    explainer = shap.TreeExplainer(model)

    # 计算SHAP值（使用少量样本）
    X_test_sample = X_test[:10]  # 只使用前10个样本提高速度
    shap_values = explainer.shap_values(X_test_sample)

    # 1. 基础概念解释
    print("\n📊 SHAP基础概念:")
    base_value_log_odds = explainer.expected_value
    base_value_prob = log_odds_to_probability(base_value_log_odds)

    print(f"• 基准值 (Base Value): {base_value_log_odds:.4f}")
    print(f"• 基准概率: {base_value_prob:.4f} ({base_value_prob*100:.1f}%)")
    print("   - 含义: 模型对所有样本的平均预测概率")
    print("   - 解释: 如果没有任何特征信息，模型会预测这个概率")
    print("=" * 50)

    # 2. 全局特征重要性
    print("\n🔍 全局特征重要性:")
    feature_importance = np.abs(shap_values).mean(0)

    if feature_names is None:
        feature_names = ["花萼长度", "花萼宽度", "花瓣长度", "花瓣宽度"]

    for i, name in enumerate(feature_names):
        importance = feature_importance[i]
        print(f"  {name}: {importance:.4f}")

    print("\n💡 解释: 数值越大，该特征对预测的影响越大")
    print("=" * 50)

    # 3. 单个样本解释（第一个测试样本）
    print(f"\n🎯 第一个样本的详细解释:")
    sample_idx = 0
    base_value_log_odds = explainer.expected_value
    sample_shap = shap_values[sample_idx]
    final_prediction_log_odds = base_value_log_odds + np.sum(sample_shap)

    # 转换为概率
    base_value_prob = log_odds_to_probability(base_value_log_odds)
    final_prediction_prob = log_odds_to_probability(final_prediction_log_odds)

    print(
        f"• 真实标签: {y_test[sample_idx]} ({'非virginica' if y_test[sample_idx] == 0 else 'virginica'})"
    )
    print(f"• 基准值: {base_value_log_odds:.4f} ({base_value_prob*100:.1f}%概率)")
    print(
        f"• 最终预测: {final_prediction_log_odds:.4f} ({final_prediction_prob*100:.1f}%概率)"
    )

    print("\n📈 特征贡献分析:")
    for i, name in enumerate(feature_names):
        contribution = sample_shap[i]
        print(f"  {name}: {contribution:+.4f}")

    print("\n💡 特征贡献解释:")
    print("• 正值: 增加预测为virginica的概率")
    print("• 负值: 减少预测为virginica的概率")
    print("• 所有特征贡献之和 + 基准值 = 最终预测概率")

    print(f"\n✅ 验证计算:")
    print(
        f"基准值 ({base_value_log_odds:.4f}) + 特征贡献总和 ({np.sum(sample_shap):.4f})"
    )
    print(f"= 最终预测值 ({final_prediction_log_odds:.4f})")
    print(f"= 最终概率 ({final_prediction_prob*100:.1f}%)")

    # 4. 概率转换解释
    print(f"\n🎲 概率转换解释:")
    print(f"• 基准概率: {base_value_prob*100:.1f}%")
    print(f"• 最终概率: {final_prediction_prob*100:.1f}%")
    print(f"• 概率变化: {final_prediction_prob - base_value_prob:+.3f}")

    if final_prediction_prob > 0.5:
        print(f"📊 模型预测: VIRGINICA ({final_prediction_prob*100:.1f}% 置信度)")
    else:
        print(f"📊 模型预测: 非VIRGINICA ({(1-final_prediction_prob)*100:.1f}% 置信度)")

    return explainer, shap_values


# 主程序执行
if __name__ == "__main__":
    # 训练模型
    print("开始训练模型...")
    model = train_model_early_stop(X_train, y_train)

    # 评估模型
    print("模型评估结果:")
    evaluate_model(model, X_test, y_test)

    # 解释模型
    iris_feature_names = ["花萼长度", "花萼宽度", "花瓣长度", "花瓣宽度"]
    print("=" * 50)
    explainer, shap_values = explain_model_with_shap(model, X_test, iris_feature_names)

    # 保存模型
    # save_model(model, "diabetes_xgboost_model.json")
