import xgboost as xgb
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.metrics import root_mean_squared_error
import matplotlib.pyplot as plt
import optuna

"""
XGBoost回归示例 - 使用糖尿病数据集 + Optuna超参数调优
糖尿病数据集包含10个特征，目标变量是糖尿病进展的定量测量
特征数：10个（年龄、性别、BMI、血压等）
样本数：442个
目标变量：糖尿病进展的定量测量（连续值）
"""

# 使用糖尿病数据集（无需网络下载）
X, y = load_diabetes(return_X_y=True)

# 展示数据信息
print("X的数据类型:", type(X))
print("y的数据类型:", type(y))
print("X的形状:", X.shape)
print("y的形状:", y.shape)
print("目标变量统计:")
print(f"  最小值: {y.min():.2f}")
print(f"  最大值: {y.max():.2f}")
print(f"  均值: {y.mean():.2f}")
print(f"  标准差: {y.std():.2f}")

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42
)


def objective(trial):
    """
    Optuna目标函数 - 定义超参数搜索空间和优化目标
    :param trial: Optuna试验对象
    :return: 验证集RMSE（越小越好）
    """
    # 定义超参数搜索空间
    params = {
        "max_depth": trial.suggest_int("max_depth", 2, 10),
        "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3, log=True),
        "n_estimators": trial.suggest_int("n_estimators", 50, 300),
        "min_child_weight": trial.suggest_int("min_child_weight", 1, 10),
        "subsample": trial.suggest_float("subsample", 0.6, 1.0),
        "colsample_bytree": trial.suggest_float("colsample_bytree", 0.6, 1.0),
        "reg_alpha": trial.suggest_float("reg_alpha", 0, 10),
        "reg_lambda": trial.suggest_float("reg_lambda", 0, 10),
        "gamma": trial.suggest_float("gamma", 0, 5),
    }

    # 进一步划分训练集为训练和验证集
    X_train_final, X_val, y_train_final, y_val = train_test_split(
        X_train, y_train, test_size=0.2, random_state=42
    )

    # 创建XGBoost回归器实例
    reg = xgb.XGBRegressor(
        objective="reg:squarederror",
        **params,
        eval_metric=["rmse"],
        early_stopping_rounds=10,
        verbosity=0,  # 减少输出噪音
    )

    # 训练模型
    reg.fit(X_train_final, y_train_final, eval_set=[(X_val, y_val)], verbose=False)

    # 返回验证集RMSE（Optuna会最小化这个值）
    best_rmse = reg.evals_result_["validation_0"]["rmse"][-1]
    return best_rmse


def optimize_hyperparameters(n_trials=100):
    """
    使用Optuna进行超参数优化
    :param n_trials: 优化试验次数
    :return: 最佳超参数和优化研究
    """
    print(f"开始Optuna超参数优化，试验次数: {n_trials}")

    # 创建Optuna研究
    study = optuna.create_study(
        direction="minimize",  # 最小化RMSE
        sampler=optuna.samplers.TPESampler(seed=42),  # 使用TPE算法
    )

    # 执行优化
    study.optimize(objective, n_trials=n_trials, show_progress_bar=True)

    # 输出优化结果
    print("\n=== Optuna优化结果 ===")
    print(f"最佳试验编号: {study.best_trial.number}")
    print(f"最佳验证RMSE: {study.best_value:.4f}")
    print("最佳超参数:")
    for key, value in study.best_params.items():
        print(f"  {key}: {value}")

    return study.best_params, study


def train_optimized_model(X_train, y_train, best_params, show_progress=True):
    """
    使用优化后的超参数训练最终模型
    :param X_train: 训练集特征
    :param y_train: 训练集目标值
    :param best_params: 优化后的超参数
    :param show_progress: 是否显示训练进度
    :return: 训练好的模型实例
    """
    if show_progress:
        print(f"使用优化超参数训练模型...")
        print(f"训练数据量: {X_train.shape[0]} 个样本, {X_train.shape[1]} 个特征")

    # 进一步划分训练集为训练和验证集
    X_train_final, X_val, y_train_final, y_val = train_test_split(
        X_train, y_train, test_size=0.2, random_state=42
    )

    # 创建XGBoost回归器实例（使用优化后的超参数）
    reg = xgb.XGBRegressor(
        objective="reg:squarederror",
        **best_params,
        eval_metric=["rmse"],
        early_stopping_rounds=10,
        verbosity=1,
    )

    # 使用训练数据拟合模型
    reg.fit(
        X_train_final,
        y_train_final,
        eval_set=[(X_train_final, y_train_final), (X_val, y_val)],
    )

    if show_progress:
        # 显示训练摘要
        print("\n=== 优化模型训练摘要 ===")
        best_iteration = (
            reg.best_iteration
            if hasattr(reg, "best_iteration")
            else best_params["n_estimators"]
        )
        print(f"最佳迭代次数: {best_iteration}")
        print(f"最终训练RMSE: {reg.evals_result_['validation_0']['rmse'][-1]:.4f}")
        print(f"最终验证RMSE: {reg.evals_result_['validation_1']['rmse'][-1]:.4f}")

        # 绘制训练历史
        plot_training_history(reg.evals_result_)

    return reg


def evaluate_model(model, X_test, y_test):
    """
    评估模型性能
    :param model: 训练好的模型实例
    :param X_test: 测试集特征
    :param y_test: 测试集目标值
    :return: 均方根误差（RMSE）
    """
    # 使用测试集进行预测，并计算均方根误差（RMSE）
    y_pred = model.predict(X_test)
    rmse = root_mean_squared_error(y_test, y_pred)
    print(f"测试集RMSE: {rmse:.4f}")
    return rmse


def plot_training_history(history):
    """
    绘制训练历史曲线
    :param history: 训练历史数据
    """
    plt.figure(figsize=(12, 4))

    # 训练集RMSE
    plt.subplot(1, 2, 1)
    plt.plot(history["validation_0"]["rmse"], label="Train RMSE")
    plt.plot(history["validation_1"]["rmse"], label="Validation RMSE")
    plt.xlabel("Epoch")
    plt.ylabel("RMSE")
    plt.title("RMSE during Training")
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.show()


def plot_optimization_history(study):
    """
    绘制Optuna优化历史
    :param study: Optuna研究对象
    """
    # 优化历史
    fig = optuna.visualization.plot_optimization_history(study)
    fig.show()

    # 超参数重要性
    fig = optuna.visualization.plot_param_importances(study)
    fig.show()

    # 超参数关系
    fig = optuna.visualization.plot_parallel_coordinate(study)
    fig.show()


def compare_models(baseline_model, optimized_model, X_test, y_test):
    """
    比较基线模型和优化模型的性能
    """
    print("\n=== 模型性能比较 ===")

    # 基线模型性能
    baseline_rmse = root_mean_squared_error(y_test, baseline_model.predict(X_test))
    print(f"基线模型测试RMSE: {baseline_rmse:.4f}")

    # 优化模型性能
    optimized_rmse = root_mean_squared_error(y_test, optimized_model.predict(X_test))
    print(f"优化模型测试RMSE: {optimized_rmse:.4f}")

    # 性能提升
    improvement = ((baseline_rmse - optimized_rmse) / baseline_rmse) * 100
    print(f"性能提升: {improvement:.2f}%")

    return baseline_rmse, optimized_rmse, improvement


def train_baseline_model(X_train, y_train):
    """
    训练基线模型（使用默认参数）
    """
    print("训练基线模型（默认参数）...")

    X_train_final, X_val, y_train_final, y_val = train_test_split(
        X_train, y_train, test_size=0.2, random_state=42
    )

    # 使用默认参数的基线模型
    baseline_model = xgb.XGBRegressor(
        objective="reg:squarederror",
        max_depth=3,
        n_estimators=100,
        eval_metric=["rmse"],
        early_stopping_rounds=10,
        verbosity=0,
    )

    baseline_model.fit(
        X_train_final, y_train_final, eval_set=[(X_val, y_val)], verbose=False
    )

    baseline_rmse = baseline_model.evals_result_["validation_0"]["rmse"][-1]
    print(f"基线模型验证RMSE: {baseline_rmse:.4f}")

    return baseline_model


# 主程序执行
if __name__ == "__main__":
    print("=" * 60)
    print("XGBoost回归模型 + Optuna超参数调优")
    print("=" * 60)

    # 1. 训练基线模型
    baseline_model = train_baseline_model(X_train, y_train)

    # 2. 使用Optuna优化超参数
    best_params, study = optimize_hyperparameters(n_trials=500)  # 可调整试验次数

    # 3. 使用优化后的超参数训练最终模型
    optimized_model = train_optimized_model(X_train, y_train, best_params)

    # 4. 评估优化模型
    print("\n优化模型评估结果:")
    evaluate_model(optimized_model, X_test, y_test)

    # 5. 比较模型性能
    compare_models(baseline_model, optimized_model, X_test, y_test)

    # 6. 可视化优化过程（可选）
    # plot_optimization_history(study)

    print("\n程序执行完成！")
