{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b8257f9a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import time\n",
    "import gc\n",
    "import matplotlib.pyplot as plt\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "from sklearn.model_selection import StratifiedKFold, KFold\n",
    "from sklearn.metrics import roc_auc_score,f1_score\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n",
    "from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n",
    "from scipy import sparse\n",
    "import seaborn as sns\n",
    "from datetime import *\n",
    "from functools import reduce\n",
    "from sklearn.metrics import roc_curve\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import feature_selection\n",
    "import datetime\n",
    "import xgboost as xgb\n",
    "from sklearn.feature_selection import VarianceThreshold\n",
    "from catboost import CatBoostClassifier as cat\n",
    "import warnings\n",
    "from sklearn.utils.class_weight import compute_class_weight\n",
    "warnings.filterwarnings('ignore')\n",
    "import optuna\n",
    "from sklearn.metrics import precision_score, recall_score,f1_score\n",
    "from lightgbm import *\n",
    "seed_list=[1993,2008,4096,1015]\n",
    "import random\n",
    "random.seed(5354)\n",
    "os.environ['PYTHONHASHSEED'] = str(5354)\n",
    "np.random.seed(5354)\n",
    "pd.set_option('display.max_info_columns', 500)\n",
    "pd.set_option('display.max_columns', 1000)\n",
    "pd.set_option('display.max_row', 300)\n",
    "pd.set_option('display.float_format', lambda x: ' %.5f' % x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "89149c80",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./train_X.pkl' , \"rb\" ) as file :\n",
    "    train_X = pickle.load(file)\n",
    "with open('./train_y.pkl' , \"rb\" ) as file :\n",
    "    train_y = pickle.load(file)\n",
    "with open('./test_X.pkl' , \"rb\" ) as file :\n",
    "    test_X = pickle.load(file)\n",
    "train_y_converted = train_y - 1\n",
    "n_classes = 4\n",
    "seed = 42"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fd9d05aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_X.to_csv(\"train_X.csv\" , index=None)\n",
    "train_y.to_csv(\"train_y.csv\" , index=None)\n",
    "test_X.to_csv(\"test_X.csv\" , index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca5cc37e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "92719070",
   "metadata": {},
   "outputs": [],
   "source": [
    "import optuna\n",
    "from optuna.pruners import HyperbandPruner\n",
    "from optuna.samplers import TPESampler\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from catboost import CatBoostClassifier\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.utils.class_weight import compute_class_weight\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "def objective_lgb_gpu(trial, train_x, train_y, n_classes, seed, use_gpu=True):\n",
    "    \"\"\"LightGBM GPU 加速的 Optuna 目标函数（支持多卡）\"\"\"\n",
    "    \n",
    "    # 预处理标签 - 确保从0开始的连续整数\n",
    "    label_encoder = LabelEncoder()\n",
    "    train_y = label_encoder.fit_transform(train_y)\n",
    "    \n",
    "    # 更新实际类别数量\n",
    "    actual_n_classes = len(np.unique(train_y))\n",
    "    if actual_n_classes != n_classes:\n",
    "        print(f\"警告: 实际类别数量为 {actual_n_classes}，已自动调整\")\n",
    "        n_classes = actual_n_classes\n",
    "    \n",
    "    # 定义可调参数\n",
    "    params = {\n",
    "        'boost_from_average': 'false',\n",
    "        'boost': 'gbdt',\n",
    "        'metric': 'multi_logloss',\n",
    "        'num_class': n_classes,\n",
    "        'objective': 'multiclass',\n",
    "        'seed': seed,\n",
    "        'verbose': -1,\n",
    "        'is_unbalance': True,\n",
    "\n",
    "\n",
    "        'boost_from_average': 'false',\n",
    "        'boost': 'gbdt',\n",
    "        'metric': 'multi_logloss',\n",
    "        'num_class': n_classes,\n",
    "        'max_depth': 7,\n",
    "        'num_leaves': 2**7 - 1,\n",
    "        'objective': 'multiclass',\n",
    "        'min_child_weight': 16,\n",
    "        'min_data_in_leaf': 6,\n",
    "        'min_split_gain': 0.7,\n",
    "        'bagging_fraction': 0.82,\n",
    "        'feature_fraction': 0.74,\n",
    "        'bagging_freq': 1,\n",
    "        'seed': seed,\n",
    "        'nthread': 8,\n",
    "        'n_jobs': 8,\n",
    "        'verbose': -1,\n",
    "        'is_unbalance': True ,  # 添加class_weight='balanced'等效参数\n",
    "        # 调参参数  \n",
    "        'reg_lambda': trial.suggest_float('reg_lambda', 1e-3, 100, log=True),\n",
    "        'reg_alpha': trial.suggest_float('reg_alpha', 1e-3, 100, log=True),\n",
    "    }\n",
    "    \n",
    "    # GPU 配置 - 动态分配GPU设备\n",
    "    if use_gpu:\n",
    "        # 使用当前fold索引来选择GPU设备\n",
    "        device_id = trial.number % 8  # 8张卡循环使用\n",
    "        params.update({\n",
    "            'device': 'gpu',\n",
    "            'gpu_platform_id': 0,\n",
    "            'gpu_device_id': device_id,\n",
    "            'gpu_use_dp': False,  # 关闭双精度以节省显存\n",
    "        })\n",
    "    \n",
    "    # 使用更快的 3 折交叉验证\n",
    "    folds = 3\n",
    "    kf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)\n",
    "    cv_scores = []\n",
    "    \n",
    "    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):\n",
    "        trn_x, trn_y = train_x.iloc[train_index], train_y[train_index]\n",
    "        val_x, val_y = train_x.iloc[valid_index], train_y[valid_index]\n",
    "        \n",
    "        # 验证当前折的标签\n",
    "        unique_trn = np.unique(trn_y)\n",
    "        unique_val = np.unique(val_y)\n",
    "        \n",
    "        if len(unique_trn) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 训练集只有 {len(unique_trn)} 类\")\n",
    "        if len(unique_val) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 验证集只有 {len(unique_val)} 类\")\n",
    "        \n",
    "        # 创建数据集\n",
    "        train_matrix = lgb.Dataset(trn_x, label=trn_y)\n",
    "        valid_matrix = lgb.Dataset(val_x, label=val_y)\n",
    "        \n",
    "        # 训练模型\n",
    "        model = lgb.train(\n",
    "            params,\n",
    "            train_matrix,\n",
    "            num_boost_round=5000,\n",
    "            valid_sets=[valid_matrix],\n",
    "            callbacks=[\n",
    "                lgb.early_stopping(stopping_rounds=100, verbose=False),\n",
    "                lgb.log_evaluation(period=0)\n",
    "            ]\n",
    "        )\n",
    "        \n",
    "        # 预测验证集\n",
    "        val_pred = model.predict(val_x, num_iteration=model.best_iteration)\n",
    "        val_pred_labels = np.argmax(val_pred, axis=1)\n",
    "        fold_f1 = f1_score(val_y, val_pred_labels, average='macro')\n",
    "        cv_scores.append(fold_f1)\n",
    "        \n",
    "        # 添加剪枝\n",
    "        if trial.should_prune():\n",
    "            raise optuna.TrialPruned()\n",
    "    \n",
    "    return np.mean(cv_scores)\n",
    "\n",
    "def objective_xgb_gpu(trial, train_x, train_y, n_classes, seed, use_gpu=True):\n",
    "    \"\"\"XGBoost GPU 加速的 Optuna 目标函数（支持多卡）\"\"\"\n",
    "    \n",
    "    # 预处理标签 - 确保从0开始的连续整数\n",
    "    label_encoder = LabelEncoder()\n",
    "    train_y = label_encoder.fit_transform(train_y)\n",
    "    \n",
    "    # 更新实际类别数量\n",
    "    actual_n_classes = len(np.unique(train_y))\n",
    "    if actual_n_classes != n_classes:\n",
    "        print(f\"警告: 实际类别数量为 {actual_n_classes}，已自动调整\")\n",
    "        n_classes = actual_n_classes\n",
    "    \n",
    "    # 定义可调参数\n",
    "    params = {\n",
    "        'booster': 'gbtree',\n",
    "        'objective': 'multi:softprob',\n",
    "        'eval_metric': 'mlogloss',\n",
    "        'num_class': n_classes,\n",
    "        'seed': seed,\n",
    "        'verbosity': 0,\n",
    "        \n",
    "        # 调参参数\n",
    "        'max_depth': trial.suggest_int('max_depth', 3, 12),\n",
    "        'min_child_weight': trial.suggest_float('min_child_weight', 1e-5, 100, log=True),\n",
    "        'gamma': trial.suggest_float('gamma', 0, 10),\n",
    "        'subsample': trial.suggest_float('subsample', 0.5, 1.0),\n",
    "        'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1.0),\n",
    "        'colsample_bylevel': trial.suggest_float('colsample_bylevel', 0.5, 1.0),\n",
    "        'reg_lambda': trial.suggest_float('reg_lambda', 1e-3, 100, log=True),\n",
    "        'reg_alpha': trial.suggest_float('reg_alpha', 1e-3, 100, log=True),\n",
    "        'learning_rate': trial.suggest_float('learning_rate', 0.001, 0.1, log=True),\n",
    "    }\n",
    "    \n",
    "    # GPU 配置 - 动态分配GPU设备\n",
    "    if use_gpu:\n",
    "        # 使用当前fold索引来选择GPU设备\n",
    "        device_id = trial.number % 8  # 8张卡循环使用\n",
    "        params.update({\n",
    "            'tree_method': 'gpu_hist',\n",
    "            'predictor': 'gpu_predictor',\n",
    "            'gpu_id': device_id,\n",
    "            'max_bin': 256,\n",
    "        })\n",
    "    else:\n",
    "        params['tree_method'] = 'hist'\n",
    "    \n",
    "    # 计算样本权重\n",
    "    class_weights = compute_class_weight('balanced', classes=np.unique(train_y), y=train_y)\n",
    "    sample_weights = np.array([class_weights[i] for i in train_y])\n",
    "    \n",
    "    # 使用更快的 3 折交叉验证\n",
    "    folds = 3\n",
    "    kf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)\n",
    "    cv_scores = []\n",
    "    \n",
    "    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):\n",
    "        trn_x, trn_y = train_x.iloc[train_index], train_y[train_index]\n",
    "        val_x, val_y = train_x.iloc[valid_index], train_y[valid_index]\n",
    "        trn_weights = sample_weights[train_index]\n",
    "        val_weights = sample_weights[valid_index]\n",
    "        \n",
    "        # 验证当前折的标签\n",
    "        unique_trn = np.unique(trn_y)\n",
    "        unique_val = np.unique(val_y)\n",
    "        \n",
    "        if len(unique_trn) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 训练集只有 {len(unique_trn)} 类\")\n",
    "        if len(unique_val) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 验证集只有 {len(unique_val)} 类\")\n",
    "        \n",
    "        # 创建 DMatrix\n",
    "        dtrain = xgb.DMatrix(trn_x, label=trn_y, weight=trn_weights)\n",
    "        dvalid = xgb.DMatrix(val_x, label=val_y, weight=val_weights)\n",
    "        \n",
    "        # 训练模型\n",
    "        model = xgb.train(\n",
    "            params,\n",
    "            dtrain,\n",
    "            num_boost_round=5000,\n",
    "            evals=[(dtrain, 'train'), (dvalid, 'eval')],\n",
    "            early_stopping_rounds=100,\n",
    "            verbose_eval=False,\n",
    "        )\n",
    "        \n",
    "        # 预测验证集\n",
    "        val_pred = model.predict(dvalid, iteration_range=(0, model.best_iteration + 1))\n",
    "        val_pred_labels = np.argmax(val_pred, axis=1)\n",
    "        fold_f1 = f1_score(val_y, val_pred_labels, average='macro')\n",
    "        cv_scores.append(fold_f1)\n",
    "        \n",
    "        # 添加剪枝\n",
    "        if trial.should_prune():\n",
    "            raise optuna.TrialPruned()\n",
    "    \n",
    "    return np.mean(cv_scores)\n",
    "\n",
    "def objective_cat_gpu(trial, train_x, train_y, n_classes, seed, use_gpu=True):\n",
    "    \"\"\"CatBoost GPU 加速的 Optuna 目标函数（支持多卡）\"\"\"\n",
    "    \n",
    "    # 预处理标签 - 确保从0开始的连续整数\n",
    "    label_encoder = LabelEncoder()\n",
    "    train_y = label_encoder.fit_transform(train_y)\n",
    "    \n",
    "    # 更新实际类别数量\n",
    "    actual_n_classes = len(np.unique(train_y))\n",
    "    if actual_n_classes != n_classes:\n",
    "        print(f\"警告: 实际类别数量为 {actual_n_classes}，已自动调整\")\n",
    "        n_classes = actual_n_classes\n",
    "    \n",
    "    # 类别列处理\n",
    "    cats_list = train_x.select_dtypes(include='category').columns\n",
    "    cats_list_idx = [i for i, col in enumerate(train_x.columns) if col in cats_list]\n",
    "    \n",
    "    # 定义可调参数\n",
    "    params = {\n",
    "        'iterations': 5000,\n",
    "        'random_seed': seed,\n",
    "        'loss_function': 'MultiClass',\n",
    "        'eval_metric': 'MultiClass',\n",
    "        'auto_class_weights': 'Balanced',\n",
    "        'verbose': False,\n",
    "        \n",
    "        # 调参参数\n",
    "        'learning_rate': trial.suggest_float('learning_rate', 0.001, 0.1, log=True),\n",
    "        'depth': trial.suggest_int('depth', 4, 10),\n",
    "        'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 1, 10),\n",
    "        'random_strength': trial.suggest_float('random_strength', 0, 10),\n",
    "        'border_count': trial.suggest_int('border_count', 32, 255),\n",
    "        'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 1, 100),\n",
    "    }\n",
    "    \n",
    "    # 添加 bootstrap_type 和 bagging_temperature\n",
    "    bootstrap_type = trial.suggest_categorical('bootstrap_type', ['Bernoulli', 'Bayesian'])\n",
    "    params['bootstrap_type'] = bootstrap_type\n",
    "    \n",
    "    if bootstrap_type == 'Bayesian':\n",
    "        # 只有当 bootstrap_type 为 Bayesian 时才添加 bagging_temperature\n",
    "        params['bagging_temperature'] = trial.suggest_float('bagging_temperature', 0, 1)\n",
    "    else:\n",
    "        # 对于 Bernoulli，添加 subsample 参数\n",
    "        params['subsample'] = trial.suggest_float('subsample', 0.5, 1.0)\n",
    "    \n",
    "    # GPU 配置 - 动态分配GPU设备\n",
    "    if use_gpu:\n",
    "        # 使用当前fold索引来选择GPU设备\n",
    "        device_id = trial.number % 8  # 8张卡循环使用\n",
    "        params.update({\n",
    "            'task_type': 'GPU',\n",
    "            'devices': f'{device_id}',  # 指定单卡\n",
    "        })\n",
    "    else:\n",
    "        params['task_type'] = 'CPU'\n",
    "    \n",
    "    # 使用更快的 3 折交叉验证\n",
    "    folds = 3\n",
    "    kf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)\n",
    "    cv_scores = []\n",
    "    \n",
    "    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):\n",
    "        trn_x, trn_y = train_x.iloc[train_index], train_y[train_index]\n",
    "        val_x, val_y = train_x.iloc[valid_index], train_y[valid_index]\n",
    "        \n",
    "        # 验证当前折的标签\n",
    "        unique_trn = np.unique(trn_y)\n",
    "        unique_val = np.unique(val_y)\n",
    "        \n",
    "        if len(unique_trn) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 训练集只有 {len(unique_trn)} 类\")\n",
    "        if len(unique_val) != n_classes:\n",
    "            print(f\"折 {i+1} 警告: 验证集只有 {len(unique_val)} 类\")\n",
    "        \n",
    "        # 训练模型\n",
    "        model = CatBoostClassifier(**params)\n",
    "        \n",
    "        model.fit(\n",
    "            trn_x, trn_y,\n",
    "            eval_set=(val_x, val_y),\n",
    "            cat_features=cats_list_idx,\n",
    "            use_best_model=True,\n",
    "            early_stopping_rounds=100,\n",
    "            verbose=False\n",
    "        )\n",
    "        \n",
    "        # 预测验证集\n",
    "        val_pred = model.predict_proba(val_x)\n",
    "        val_pred_labels = np.argmax(val_pred, axis=1)\n",
    "        fold_f1 = f1_score(val_y, val_pred_labels, average='macro')\n",
    "        cv_scores.append(fold_f1)\n",
    "        \n",
    "        # 添加剪枝\n",
    "        if trial.should_prune():\n",
    "            raise optuna.TrialPruned()\n",
    "    \n",
    "    return np.mean(cv_scores)\n",
    "\n",
    "def optimize_model_with_gpu(model_type, train_x, train_y, n_classes, seed, n_trials=50):\n",
    "    \"\"\"通用 GPU 加速模型优化函数（支持多卡并行）\"\"\"\n",
    "    \n",
    "    # 选择目标函数\n",
    "    if model_type == \"lgb\":\n",
    "        objective = objective_lgb_gpu\n",
    "    elif model_type == \"xgb\":\n",
    "        objective = objective_xgb_gpu\n",
    "    elif model_type == \"cat\":\n",
    "        objective = objective_cat_gpu\n",
    "    else:\n",
    "        raise ValueError(f\"不支持的模型类型: {model_type}\")\n",
    "    \n",
    "    # 创建 Optuna 研究\n",
    "    sampler = TPESampler(seed=seed, n_startup_trials=10)\n",
    "    pruner = HyperbandPruner(min_resource=1, max_resource=3, reduction_factor=3)\n",
    "    \n",
    "    study = optuna.create_study(\n",
    "        direction='maximize',\n",
    "        sampler=sampler,\n",
    "        pruner=pruner\n",
    "    )\n",
    "    \n",
    "    # 优化过程 - 使用8个并行作业（对应8张卡）\n",
    "    study.optimize(\n",
    "        lambda trial: objective(trial, train_x, train_y, n_classes, seed, use_gpu=True),\n",
    "        n_trials=n_trials,\n",
    "        timeout=1800,  # 30分钟超时\n",
    "        show_progress_bar=True,\n",
    "        gc_after_trial=True,\n",
    "        n_jobs=8  # 并行运行8个试验\n",
    "    )\n",
    "    \n",
    "    return study\n",
    "\n",
    "def train_final_model(model_type, train_x, train_y, test_x, best_params, n_classes, seed):\n",
    "    \"\"\"使用最佳参数训练最终模型（支持多卡）\"\"\"\n",
    "    \n",
    "    # 预处理标签 - 确保从0开始的连续整数\n",
    "    label_encoder = LabelEncoder()\n",
    "    train_y = label_encoder.fit_transform(train_y)\n",
    "    \n",
    "    # 更新实际类别数量\n",
    "    actual_n_classes = len(np.unique(train_y))\n",
    "    if actual_n_classes != n_classes:\n",
    "        print(f\"警告: 实际类别数量为 {actual_n_classes}，已自动调整\")\n",
    "        n_classes = actual_n_classes\n",
    "    \n",
    "    if model_type == \"lgb\":\n",
    "        # 添加 GPU 配置 - 使用第一张卡\n",
    "        best_params.update({\n",
    "            'device': 'gpu',\n",
    "            'gpu_platform_id': 0,\n",
    "            'gpu_device_id': 0,\n",
    "            'gpu_use_dp': False,  # 关闭双精度以节省显存\n",
    "            'boost_from_average': 'false',\n",
    "            'boost': 'gbdt',\n",
    "            'metric': 'multi_logloss',\n",
    "            'num_class': n_classes,\n",
    "            'objective': 'multiclass',\n",
    "            'seed': seed,\n",
    "            'verbose': -1,\n",
    "            'is_unbalance': True,\n",
    "        })\n",
    "        \n",
    "        # 创建数据集\n",
    "        train_matrix = lgb.Dataset(train_x, label=train_y)\n",
    "        \n",
    "        # 训练模型\n",
    "        model = lgb.train(\n",
    "            best_params,\n",
    "            train_matrix,\n",
    "            num_boost_round=10000,\n",
    "            valid_sets=[train_matrix],\n",
    "            callbacks=[\n",
    "                lgb.log_evaluation(period=100),\n",
    "                lgb.early_stopping(stopping_rounds=500)\n",
    "            ]\n",
    "        )\n",
    "        \n",
    "        # 预测\n",
    "        test_pred = model.predict(test_x)\n",
    "        \n",
    "        return model, test_pred\n",
    "    \n",
    "    elif model_type == \"xgb\":\n",
    "        # 添加 GPU 配置 - 使用第一张卡\n",
    "        best_params.update({\n",
    "            'tree_method': 'gpu_hist',\n",
    "            'predictor': 'gpu_predictor',\n",
    "            'gpu_id': 0,\n",
    "            'verbosity': 0,\n",
    "            'objective': 'multi:softprob',\n",
    "            'eval_metric': 'mlogloss',\n",
    "            'num_class': n_classes,\n",
    "            'seed': seed,\n",
    "        })\n",
    "        \n",
    "        # 计算样本权重\n",
    "        class_weights = compute_class_weight('balanced', classes=np.unique(train_y), y=train_y)\n",
    "        sample_weights = np.array([class_weights[i] for i in train_y])\n",
    "        \n",
    "        # 创建 DMatrix\n",
    "        dtrain = xgb.DMatrix(train_x, label=train_y, weight=sample_weights)\n",
    "        dtest = xgb.DMatrix(test_x)\n",
    "        \n",
    "        # 训练模型\n",
    "        model = xgb.train(\n",
    "            best_params,\n",
    "            dtrain,\n",
    "            num_boost_round=10000,\n",
    "            evals=[(dtrain, 'train')],\n",
    "            verbose_eval=100,\n",
    "        )\n",
    "        \n",
    "        # 预测\n",
    "        test_pred = model.predict(dtest)\n",
    "        \n",
    "        return model, test_pred\n",
    "    \n",
    "    elif model_type == \"cat\":\n",
    "        # 类别列处理\n",
    "        cats_list = train_x.select_dtypes(include='category').columns\n",
    "        cats_list_idx = [i for i, col in enumerate(train_x.columns) if col in cats_list]\n",
    "        \n",
    "        # 添加 GPU 配置 - 使用所有8张卡\n",
    "        best_params.update({\n",
    "            'task_type': 'GPU',\n",
    "            'devices': '0-7',  # 使用所有8张卡\n",
    "            'loss_function': 'MultiClass',\n",
    "            'eval_metric': 'MultiClass',\n",
    "            'random_seed': seed,\n",
    "            'iterations': 10000,\n",
    "            'verbose': 100,\n",
    "        })\n",
    "        \n",
    "        # 训练模型\n",
    "        model = CatBoostClassifier(**best_params)\n",
    "        model.fit(\n",
    "            train_x, train_y,\n",
    "            cat_features=cats_list_idx,\n",
    "            verbose=100\n",
    "        )\n",
    "        \n",
    "        # 预测\n",
    "        test_pred = model.predict_proba(test_x)\n",
    "        \n",
    "        return model, test_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3deb8e57",
   "metadata": {},
   "outputs": [],
   "source": [
    "seed = 42\n",
    "n_classes = 4  # 根据实际情况修改"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "a02cfde4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "标签值统计:\n",
      "0     222\n",
      "1     442\n",
      "2     355\n",
      "3    1954\n",
      "Name: Target, dtype: int64\n",
      "实际类别数量: 4\n",
      "传入的 n_classes: 4\n",
      "标签范围: [0, 3]\n"
     ]
    }
   ],
   "source": [
    "# 检查标签值\n",
    "print(\"标签值统计:\")\n",
    "print(pd.Series(train_y_converted).value_counts().sort_index())\n",
    "\n",
    "# 检查类别数量\n",
    "actual_n_classes = len(np.unique(train_y_converted))\n",
    "print(f\"实际类别数量: {actual_n_classes}\")\n",
    "print(f\"传入的 n_classes: {n_classes}\")\n",
    "\n",
    "# 检查标签范围\n",
    "min_label = np.min(train_y_converted)\n",
    "max_label = np.max(train_y_converted)\n",
    "print(f\"标签范围: [{min_label}, {max_label}]\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "3e27d1db",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果范围不正确，重新编码标签\n",
    "if min_label != 0 or max_label != actual_n_classes - 1:\n",
    "    print(\"重新编码标签为从0开始的连续整数...\")\n",
    "    label_encoder = LabelEncoder()\n",
    "    train_y_converted = label_encoder.fit_transform(train_y_converted)\n",
    "    n_classes = len(label_encoder.classes_)\n",
    "    print(f\"新标签范围: [{np.min(train_y_converted)}, {np.max(train_y_converted)}]\")\n",
    "    print(f\"新类别数量: {n_classes}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "e96aac15",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2025-10-20 09:03:00,192] A new study created in memory with name: no-name-2e9437eb-e25f-4bd3-9453-65fd3fa590b2\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "优化 LightGBM 模型...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ea4b128ab194493289012391053c550b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/50 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[I 2025-10-20 09:03:08,829] Trial 7 finished with value: 0.35378834000028886 and parameters: {'reg_lambda': 0.13158532019287364, 'reg_alpha': 2.9622115686284833}. Best is trial 7 with value: 0.35378834000028886.\n",
      "[I 2025-10-20 09:03:16,165] Trial 3 finished with value: 0.35737047848783776 and parameters: {'reg_lambda': 9.09576091924305, 'reg_alpha': 1.6108786851462102}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:03:29,989] Trial 6 finished with value: 0.3555701356917962 and parameters: {'reg_lambda': 9.669289089807766, 'reg_alpha': 0.40958930008959876}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:03:43,456] Trial 5 finished with value: 0.3572416496480492 and parameters: {'reg_lambda': 0.002426773051041857, 'reg_alpha': 1.4336677162415736}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:03:50,874] Trial 1 finished with value: 0.35426354554767614 and parameters: {'reg_lambda': 0.07436512736016988, 'reg_alpha': 1.1243348554584907}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:04:20,839] Trial 2 finished with value: 0.3546821424360407 and parameters: {'reg_lambda': 0.013103228883626363, 'reg_alpha': 0.24047133167642015}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:04:26,780] Trial 0 finished with value: 0.35314680482038624 and parameters: {'reg_lambda': 0.0026053665110575466, 'reg_alpha': 0.0019364415914091423}. Best is trial 3 with value: 0.35737047848783776.\n",
      "[I 2025-10-20 09:04:30,065] Trial 4 finished with value: 0.35590235997884784 and parameters: {'reg_lambda': 0.04643031590391744, 'reg_alpha': 0.002521081443342131}. Best is trial 3 with value: 0.35737047848783776.\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[8], line 3\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[38;5;66;03m# 优化 LightGBM\u001b[39;00m\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m优化 LightGBM 模型...\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m----> 3\u001b[0m lgb_study \u001b[38;5;241m=\u001b[39m \u001b[43moptimize_model_with_gpu\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlgb\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrain_X\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrain_y_converted\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_classes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m      4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLightGBM 最佳 F1: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mlgb_study\u001b[38;5;241m.\u001b[39mbest_value\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m.5f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m      5\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m最佳参数:\u001b[39m\u001b[38;5;124m\"\u001b[39m, lgb_study\u001b[38;5;241m.\u001b[39mbest_params)\n",
      "Cell \u001b[1;32mIn[4], line 340\u001b[0m, in \u001b[0;36moptimize_model_with_gpu\u001b[1;34m(model_type, train_x, train_y, n_classes, seed, n_trials)\u001b[0m\n\u001b[0;32m    333\u001b[0m study \u001b[38;5;241m=\u001b[39m optuna\u001b[38;5;241m.\u001b[39mcreate_study(\n\u001b[0;32m    334\u001b[0m     direction\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmaximize\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m    335\u001b[0m     sampler\u001b[38;5;241m=\u001b[39msampler,\n\u001b[0;32m    336\u001b[0m     pruner\u001b[38;5;241m=\u001b[39mpruner\n\u001b[0;32m    337\u001b[0m )\n\u001b[0;32m    339\u001b[0m \u001b[38;5;66;03m# 优化过程 - 使用8个并行作业（对应8张卡）\u001b[39;00m\n\u001b[1;32m--> 340\u001b[0m \u001b[43mstudy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43moptimize\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    341\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43;01mlambda\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mobjective\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrain_x\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrain_y\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_classes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muse_gpu\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    342\u001b[0m \u001b[43m    \u001b[49m\u001b[43mn_trials\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mn_trials\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    343\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1800\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# 30分钟超时\u001b[39;49;00m\n\u001b[0;32m    344\u001b[0m \u001b[43m    \u001b[49m\u001b[43mshow_progress_bar\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    345\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgc_after_trial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    346\u001b[0m \u001b[43m    \u001b[49m\u001b[43mn_jobs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m8\u001b[39;49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# 并行运行8个试验\u001b[39;49;00m\n\u001b[0;32m    347\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    349\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m study\n",
      "File \u001b[1;32mc:\\Users\\14714\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\optuna\\study\\study.py:490\u001b[0m, in \u001b[0;36mStudy.optimize\u001b[1;34m(self, func, n_trials, timeout, n_jobs, catch, callbacks, gc_after_trial, show_progress_bar)\u001b[0m\n\u001b[0;32m    388\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21moptimize\u001b[39m(\n\u001b[0;32m    389\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m    390\u001b[0m     func: ObjectiveFuncType,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    397\u001b[0m     show_progress_bar: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m    398\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    399\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Optimize an objective function.\u001b[39;00m\n\u001b[0;32m    400\u001b[0m \n\u001b[0;32m    401\u001b[0m \u001b[38;5;124;03m    Optimization is done by choosing a suitable set of hyperparameter values from a given\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    488\u001b[0m \u001b[38;5;124;03m            If nested invocation of this method occurs.\u001b[39;00m\n\u001b[0;32m    489\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 490\u001b[0m     \u001b[43m_optimize\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    491\u001b[0m \u001b[43m        \u001b[49m\u001b[43mstudy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    492\u001b[0m \u001b[43m        \u001b[49m\u001b[43mfunc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    493\u001b[0m \u001b[43m        \u001b[49m\u001b[43mn_trials\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mn_trials\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    494\u001b[0m \u001b[43m        \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    495\u001b[0m \u001b[43m        \u001b[49m\u001b[43mn_jobs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mn_jobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    496\u001b[0m \u001b[43m        \u001b[49m\u001b[43mcatch\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mtuple\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcatch\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43misinstance\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcatch\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mIterable\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mcatch\u001b[49m\u001b[43m,\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    497\u001b[0m \u001b[43m        \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    498\u001b[0m \u001b[43m        \u001b[49m\u001b[43mgc_after_trial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgc_after_trial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    499\u001b[0m \u001b[43m        \u001b[49m\u001b[43mshow_progress_bar\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mshow_progress_bar\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    500\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mc:\\Users\\14714\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\optuna\\study\\_optimize.py:97\u001b[0m, in \u001b[0;36m_optimize\u001b[1;34m(study, func, n_trials, timeout, n_jobs, catch, callbacks, gc_after_trial, show_progress_bar)\u001b[0m\n\u001b[0;32m     94\u001b[0m     \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[0;32m     96\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(futures) \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m n_jobs:\n\u001b[1;32m---> 97\u001b[0m     completed, futures \u001b[38;5;241m=\u001b[39m \u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfutures\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_when\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mFIRST_COMPLETED\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     98\u001b[0m     \u001b[38;5;66;03m# Raise if exception occurred in executing the completed futures.\u001b[39;00m\n\u001b[0;32m     99\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m f \u001b[38;5;129;01min\u001b[39;00m completed:\n",
      "File \u001b[1;32mc:\\Users\\14714\\AppData\\Local\\Programs\\Python\\Python39\\lib\\concurrent\\futures\\_base.py:307\u001b[0m, in \u001b[0;36mwait\u001b[1;34m(fs, timeout, return_when)\u001b[0m\n\u001b[0;32m    303\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m DoneAndNotDoneFutures(done, not_done)\n\u001b[0;32m    305\u001b[0m     waiter \u001b[38;5;241m=\u001b[39m _create_and_install_waiters(fs, return_when)\n\u001b[1;32m--> 307\u001b[0m \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mevent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    308\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m f \u001b[38;5;129;01min\u001b[39;00m fs:\n\u001b[0;32m    309\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m f\u001b[38;5;241m.\u001b[39m_condition:\n",
      "File \u001b[1;32mc:\\Users\\14714\\AppData\\Local\\Programs\\Python\\Python39\\lib\\threading.py:574\u001b[0m, in \u001b[0;36mEvent.wait\u001b[1;34m(self, timeout)\u001b[0m\n\u001b[0;32m    572\u001b[0m signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_flag\n\u001b[0;32m    573\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m signaled:\n\u001b[1;32m--> 574\u001b[0m     signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cond\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    575\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m signaled\n",
      "File \u001b[1;32mc:\\Users\\14714\\AppData\\Local\\Programs\\Python\\Python39\\lib\\threading.py:312\u001b[0m, in \u001b[0;36mCondition.wait\u001b[1;34m(self, timeout)\u001b[0m\n\u001b[0;32m    310\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:    \u001b[38;5;66;03m# restore state no matter what (e.g., KeyboardInterrupt)\u001b[39;00m\n\u001b[0;32m    311\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 312\u001b[0m         \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43macquire\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    313\u001b[0m         gotit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m    314\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# 优化 LightGBM\n",
    "print(\"\\n优化 LightGBM 模型...\")\n",
    "lgb_study = optimize_model_with_gpu(\"lgb\", train_X, train_y_converted, n_classes, seed)\n",
    "print(f\"LightGBM 最佳 F1: {lgb_study.best_value:.5f}\")\n",
    "print(\"最佳参数:\", lgb_study.best_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "d57885af",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2025-10-14 11:12:06,853] A new study created in memory with name: no-name-cc0e7b16-3d75-4e96-8e81-692fce5f94da\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "优化 XGBoost 模型...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f6e343836e1146798cf16253aeb65ab0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/50 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[I 2025-10-14 11:12:35,054] Trial 0 finished with value: 0.34215685640415217 and parameters: {'max_depth': 6, 'min_child_weight': 45.18560951024108, 'gamma': 7.319939418114051, 'subsample': 0.7993292420985183, 'colsample_bytree': 0.5780093202212182, 'colsample_bylevel': 0.5779972601681014, 'reg_lambda': 0.0019517224641449498, 'reg_alpha': 21.42302175774105, 'learning_rate': 0.015930522616241012}. Best is trial 0 with value: 0.34215685640415217.\n",
      "[I 2025-10-14 11:13:10,613] Trial 1 finished with value: 0.4019649796637112 and parameters: {'max_depth': 10, 'min_child_weight': 1.3934502251337587e-05, 'gamma': 9.699098521619943, 'subsample': 0.9162213204002109, 'colsample_bytree': 0.6061695553391381, 'colsample_bylevel': 0.5909124836035503, 'reg_lambda': 0.008260808399079604, 'reg_alpha': 0.033205591037519584, 'learning_rate': 0.01120760621186057}. Best is trial 1 with value: 0.4019649796637112.\n",
      "[I 2025-10-14 11:14:23,372] Trial 2 finished with value: 0.40404586950174864 and parameters: {'max_depth': 7, 'min_child_weight': 0.0010929592787219397, 'gamma': 6.118528947223795, 'subsample': 0.569746930326021, 'colsample_bytree': 0.6460723242676091, 'colsample_bylevel': 0.6831809216468459, 'reg_lambda': 0.19069966103000435, 'reg_alpha': 8.43101393208247, 'learning_rate': 0.002508115686045232}. Best is trial 2 with value: 0.40404586950174864.\n",
      "[I 2025-10-14 11:14:34,999] Trial 3 finished with value: 0.3095415092493797 and parameters: {'max_depth': 8, 'min_child_weight': 0.14024971326600363, 'gamma': 0.46450412719997725, 'subsample': 0.8037724259507192, 'colsample_bytree': 0.5852620618436457, 'colsample_bylevel': 0.5325257964926398, 'reg_lambda': 55.51721685244721, 'reg_alpha': 67.32248920775338, 'learning_rate': 0.041380401125610165}. Best is trial 2 with value: 0.40404586950174864.\n",
      "[I 2025-10-14 11:15:28,517] Trial 4 finished with value: 0.30734512590111385 and parameters: {'max_depth': 6, 'min_child_weight': 4.8273056519757004e-05, 'gamma': 6.842330265121569, 'subsample': 0.7200762468698007, 'colsample_bytree': 0.5610191174223894, 'colsample_bylevel': 0.7475884550556351, 'reg_lambda': 0.0014857392806279248, 'reg_alpha': 35.20481045526041, 'learning_rate': 0.0032927591344236173}. Best is trial 2 with value: 0.40404586950174864.\n",
      "[I 2025-10-14 11:15:34,724] Trial 5 finished with value: 0.31174593838971904 and parameters: {'max_depth': 9, 'min_child_weight': 0.0015204688692198919, 'gamma': 5.200680211778108, 'subsample': 0.7733551396716398, 'colsample_bytree': 0.5924272277627636, 'colsample_bylevel': 0.9847923138822793, 'reg_lambda': 7.510418138777549, 'reg_alpha': 49.830438374949125, 'learning_rate': 0.06161049539380966}. Best is trial 2 with value: 0.40404586950174864.\n",
      "[I 2025-10-14 11:15:41,538] Trial 6 finished with value: 0.40893493123107655 and parameters: {'max_depth': 8, 'min_child_weight': 28.387009634436275, 'gamma': 0.884925020519195, 'subsample': 0.5979914312095727, 'colsample_bytree': 0.522613644455269, 'colsample_bylevel': 0.6626651653816322, 'reg_lambda': 0.08777815504719653, 'reg_alpha': 0.022737628102536857, 'learning_rate': 0.045443839603360174}. Best is trial 6 with value: 0.40893493123107655.\n",
      "[I 2025-10-14 11:16:54,323] Trial 7 finished with value: 0.3814401511963843 and parameters: {'max_depth': 6, 'min_child_weight': 0.000925851997344378, 'gamma': 5.426960831582485, 'subsample': 0.5704621124873813, 'colsample_bytree': 0.9010984903770198, 'colsample_bylevel': 0.5372753218398854, 'reg_lambda': 85.98737339212276, 'reg_alpha': 7.264803074826735, 'learning_rate': 0.002497073714505273}. Best is trial 6 with value: 0.40893493123107655.\n",
      "[I 2025-10-14 11:17:03,796] Trial 8 finished with value: 0.4030032172426808 and parameters: {'max_depth': 3, 'min_child_weight': 5.107754312955835, 'gamma': 7.068573438476172, 'subsample': 0.8645035840204937, 'colsample_bytree': 0.8856351733429728, 'colsample_bylevel': 0.5370223258670452, 'reg_lambda': 0.061991000078022655, 'reg_alpha': 0.0037961668958008117, 'learning_rate': 0.05323617594751501}. Best is trial 6 with value: 0.40893493123107655.\n",
      "[I 2025-10-14 11:18:11,783] Trial 9 finished with value: 0.402502112289757 and parameters: {'max_depth': 9, 'min_child_weight': 0.00207150589708162, 'gamma': 0.6355835028602363, 'subsample': 0.6554911608578311, 'colsample_bytree': 0.6625916610133735, 'colsample_bylevel': 0.864803089169032, 'reg_lambda': 1.5409457762881555, 'reg_alpha': 27.293781650374736, 'learning_rate': 0.008798929749689027}. Best is trial 6 with value: 0.40893493123107655.\n",
      "[I 2025-10-14 11:18:21,808] Trial 10 finished with value: 0.41784326647705305 and parameters: {'max_depth': 12, 'min_child_weight': 0.665344514010968, 'gamma': 2.509762066394919, 'subsample': 0.5089809378074099, 'colsample_bytree': 0.7828065453407016, 'colsample_bylevel': 0.6808837830192477, 'reg_lambda': 0.02179783915187063, 'reg_alpha': 0.09167785859319849, 'learning_rate': 0.02176951803542744}. Best is trial 10 with value: 0.41784326647705305.\n",
      "[I 2025-10-14 11:18:29,659] Trial 11 finished with value: 0.42079520620338756 and parameters: {'max_depth': 12, 'min_child_weight': 0.918528297775761, 'gamma': 2.4544436597901944, 'subsample': 0.5132167344504134, 'colsample_bytree': 0.777751009017812, 'colsample_bylevel': 0.6865842761751744, 'reg_lambda': 0.03439402959182402, 'reg_alpha': 0.22945022158230544, 'learning_rate': 0.025897641777143522}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:18:38,261] Trial 12 finished with value: 0.4179245877535162 and parameters: {'max_depth': 12, 'min_child_weight': 0.33189070139692495, 'gamma': 2.940957743850063, 'subsample': 0.5063476365083546, 'colsample_bytree': 0.781979666086564, 'colsample_bylevel': 0.8124248534786498, 'reg_lambda': 0.013939061207530199, 'reg_alpha': 0.69891055145951, 'learning_rate': 0.02176902952454701}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:19:06,315] Trial 13 finished with value: 0.4160613995375489 and parameters: {'max_depth': 12, 'min_child_weight': 0.039770446079252966, 'gamma': 3.2480488180025446, 'subsample': 0.5009396842696076, 'colsample_bytree': 0.7733099724742855, 'colsample_bylevel': 0.8522787142520779, 'reg_lambda': 0.5990451078570408, 'reg_alpha': 0.9030561635385029, 'learning_rate': 0.0062650558945544985}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:19:13,736] Trial 14 finished with value: 0.40704517691655456 and parameters: {'max_depth': 11, 'min_child_weight': 1.0362339369686302, 'gamma': 3.208580027676755, 'subsample': 0.9980084946806067, 'colsample_bytree': 0.9977891442946909, 'colsample_bylevel': 0.8256139655211495, 'reg_lambda': 0.021307634779434294, 'reg_alpha': 0.7834124040299031, 'learning_rate': 0.025695327964024475}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:19:17,016] Trial 15 finished with value: 0.41160793264256107 and parameters: {'max_depth': 11, 'min_child_weight': 2.254429985910235, 'gamma': 2.0630802167418665, 'subsample': 0.6869888528707923, 'colsample_bytree': 0.7172335772757619, 'colsample_bylevel': 0.7803462240175202, 'reg_lambda': 0.006988709583909563, 'reg_alpha': 0.23211953407414368, 'learning_rate': 0.0900602049993969}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:20:44,746] Trial 16 finished with value: 0.41260327734554086 and parameters: {'max_depth': 3, 'min_child_weight': 0.08453216359293125, 'gamma': 3.7892407055136132, 'subsample': 0.6391841399773438, 'colsample_bytree': 0.8414922583697773, 'colsample_bylevel': 0.9335194328786414, 'reg_lambda': 0.5101038058304649, 'reg_alpha': 2.4537152079257187, 'learning_rate': 0.0010625721497327498}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:20:54,466] Trial 17 finished with value: 0.4096501321694201 and parameters: {'max_depth': 11, 'min_child_weight': 0.009311721469975862, 'gamma': 4.2866861418889455, 'subsample': 0.5544198973125091, 'colsample_bytree': 0.7275105407493591, 'colsample_bylevel': 0.7638661036158657, 'reg_lambda': 0.006611160182967721, 'reg_alpha': 0.0015188821076140718, 'learning_rate': 0.028339568896928628}. Best is trial 11 with value: 0.42079520620338756.\n",
      "[I 2025-10-14 11:21:08,827] Trial 18 finished with value: 0.42436007510202467 and parameters: {'max_depth': 12, 'min_child_weight': 0.4353144353062773, 'gamma': 1.8330535581407577, 'subsample': 0.5001951158607271, 'colsample_bytree': 0.8314292657588028, 'colsample_bylevel': 0.7192173838727322, 'reg_lambda': 0.040133004481814366, 'reg_alpha': 0.18479903681894086, 'learning_rate': 0.014811347672478076}. Best is trial 18 with value: 0.42436007510202467.\n",
      "[I 2025-10-14 11:21:32,061] Trial 19 finished with value: 0.417181748271173 and parameters: {'max_depth': 10, 'min_child_weight': 5.921416079179736, 'gamma': 1.6994669162142548, 'subsample': 0.6228156701596408, 'colsample_bytree': 0.8346427574647917, 'colsample_bylevel': 0.6222716007946105, 'reg_lambda': 1.74958762026278, 'reg_alpha': 0.01013789884292139, 'learning_rate': 0.007012123849790405}. Best is trial 18 with value: 0.42436007510202467.\n",
      "[I 2025-10-14 11:22:04,645] Trial 20 finished with value: 0.4274249776524666 and parameters: {'max_depth': 10, 'min_child_weight': 0.007677962302505544, 'gamma': 1.4726135966679625, 'subsample': 0.6893629358077933, 'colsample_bytree': 0.964051434116436, 'colsample_bylevel': 0.7183682595935981, 'reg_lambda': 0.04850690368626894, 'reg_alpha': 0.13235616186575136, 'learning_rate': 0.004393287593382586}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:22:37,028] Trial 21 finished with value: 0.4190767442205043 and parameters: {'max_depth': 10, 'min_child_weight': 0.012453762106549006, 'gamma': 1.520820902352685, 'subsample': 0.7133826526707893, 'colsample_bytree': 0.9867145705142458, 'colsample_bylevel': 0.7274918439659879, 'reg_lambda': 0.06275092669365513, 'reg_alpha': 0.17987584811827412, 'learning_rate': 0.004713859645341435}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:22:50,662] Trial 22 finished with value: 0.4207350543009046 and parameters: {'max_depth': 12, 'min_child_weight': 0.24712770001090273, 'gamma': 1.3855044684954663, 'subsample': 0.545008320158644, 'colsample_bytree': 0.9463714497407678, 'colsample_bylevel': 0.6978410767638589, 'reg_lambda': 0.21238434289313105, 'reg_alpha': 0.06019113317113194, 'learning_rate': 0.0129315056977218}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:23:58,459] Trial 23 finished with value: 0.4064214859885933 and parameters: {'max_depth': 11, 'min_child_weight': 0.013723519345686762, 'gamma': 0.19070657318580575, 'subsample': 0.6042781133345477, 'colsample_bytree': 0.8571830939780881, 'colsample_bylevel': 0.6354689857368441, 'reg_lambda': 0.04840850464637117, 'reg_alpha': 0.3276842801543712, 'learning_rate': 0.0016230597160565307}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:24:35,580] Trial 24 finished with value: 0.4156372108722864 and parameters: {'max_depth': 10, 'min_child_weight': 0.00016963677919364564, 'gamma': 2.44903725147208, 'subsample': 0.6703730532342718, 'colsample_bytree': 0.92609987111392, 'colsample_bylevel': 0.7124653166145828, 'reg_lambda': 0.11939590009311192, 'reg_alpha': 2.124733560140888, 'learning_rate': 0.004934168934796753}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:24:49,525] Trial 25 finished with value: 0.4081987143522192 and parameters: {'max_depth': 9, 'min_child_weight': 10.871971539898363, 'gamma': 4.317953679347482, 'subsample': 0.5380639762715508, 'colsample_bytree': 0.8173922085072568, 'colsample_bylevel': 0.6438426611827259, 'reg_lambda': 0.0035850743409115973, 'reg_alpha': 0.08427584287061336, 'learning_rate': 0.014849130983490338}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:24:56,353] Trial 26 finished with value: 0.4157364428739145 and parameters: {'max_depth': 12, 'min_child_weight': 0.05314833395812165, 'gamma': 1.311037579942934, 'subsample': 0.8548615810556881, 'colsample_bytree': 0.6944753351583808, 'colsample_bylevel': 0.7914927567077842, 'reg_lambda': 0.028729768429545968, 'reg_alpha': 0.012751248889166573, 'learning_rate': 0.0333128974762636}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:25:15,415] Trial 27 finished with value: 0.42323431914984194 and parameters: {'max_depth': 11, 'min_child_weight': 1.3069323819622756, 'gamma': 0.06381068643884502, 'subsample': 0.5881728876805504, 'colsample_bytree': 0.9483716174146464, 'colsample_bylevel': 0.7280730989482597, 'reg_lambda': 0.5073480923285257, 'reg_alpha': 0.2880886275042888, 'learning_rate': 0.007853955337551158}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:25:39,251] Trial 28 finished with value: 0.41829734618966646 and parameters: {'max_depth': 11, 'min_child_weight': 0.004413186417103546, 'gamma': 0.015702322211650355, 'subsample': 0.5952581839382227, 'colsample_bytree': 0.9505194794070221, 'colsample_bylevel': 0.7407119666074966, 'reg_lambda': 2.156096332878121, 'reg_alpha': 0.4404627303801279, 'learning_rate': 0.007992556389336086}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:26:50,557] Trial 29 finished with value: 0.4017849695501789 and parameters: {'max_depth': 5, 'min_child_weight': 65.85484347429804, 'gamma': 8.137383455721935, 'subsample': 0.7460764198630866, 'colsample_bytree': 0.8790449941853369, 'colsample_bylevel': 0.5926963597463493, 'reg_lambda': 7.364538144949738, 'reg_alpha': 1.5294711497080635, 'learning_rate': 0.0037036872876260616}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:27:03,979] Trial 30 finished with value: 0.4212329837429683 and parameters: {'max_depth': 9, 'min_child_weight': 20.769038141256722, 'gamma': 0.8944695453852135, 'subsample': 0.6348214258474993, 'colsample_bytree': 0.9624568759356427, 'colsample_bylevel': 0.9101320728802786, 'reg_lambda': 0.39228326896622956, 'reg_alpha': 0.11376071168000061, 'learning_rate': 0.01615190576581435}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:27:15,794] Trial 31 finished with value: 0.4104629200243819 and parameters: {'max_depth': 9, 'min_child_weight': 17.052467835061115, 'gamma': 0.9913457471339839, 'subsample': 0.7010882143165703, 'colsample_bytree': 0.9712604665734232, 'colsample_bylevel': 0.8911450971554931, 'reg_lambda': 0.5963600269763232, 'reg_alpha': 0.10471774254252485, 'learning_rate': 0.018718686785058794}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:27:30,853] Trial 32 finished with value: 0.41299116025872556 and parameters: {'max_depth': 10, 'min_child_weight': 3.6918093293325636, 'gamma': 1.7420917761852133, 'subsample': 0.6441546339307593, 'colsample_bytree': 0.9214973655993384, 'colsample_bylevel': 0.9983954804516644, 'reg_lambda': 0.2599166419246343, 'reg_alpha': 0.029236380687157743, 'learning_rate': 0.010220481737527149}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:28:11,501] Trial 33 finished with value: 0.382430938903591 and parameters: {'max_depth': 8, 'min_child_weight': 91.6341284229552, 'gamma': 9.815344426995159, 'subsample': 0.5847521644721558, 'colsample_bytree': 0.9512678216854856, 'colsample_bylevel': 0.917436024478976, 'reg_lambda': 0.12356173630704277, 'reg_alpha': 0.0615939330207551, 'learning_rate': 0.012345980536005554}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:28:22,375] Trial 34 finished with value: 0.41545177341225953 and parameters: {'max_depth': 10, 'min_child_weight': 1.6524575191172821, 'gamma': 0.7336958838838856, 'subsample': 0.622383434386241, 'colsample_bytree': 0.9078029144210029, 'colsample_bylevel': 0.8029012159698944, 'reg_lambda': 0.37695343225169203, 'reg_alpha': 0.14840678311439529, 'learning_rate': 0.016092054211681667}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:29:02,303] Trial 35 finished with value: 0.41787205821167 and parameters: {'max_depth': 11, 'min_child_weight': 0.3202244801872247, 'gamma': 0.12310334912828425, 'subsample': 0.6810998227457783, 'colsample_bytree': 0.9610673316462865, 'colsample_bylevel': 0.7207203914283129, 'reg_lambda': 0.9514071942662172, 'reg_alpha': 5.888355125204394, 'learning_rate': 0.005556156821352741}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:29:48,727] Trial 36 finished with value: 0.40525897068774297 and parameters: {'max_depth': 9, 'min_child_weight': 0.1123116527949194, 'gamma': 7.913191975793806, 'subsample': 0.757478257038914, 'colsample_bytree': 0.8759759365384654, 'colsample_bylevel': 0.7567406226713911, 'reg_lambda': 4.29866035730567, 'reg_alpha': 0.04137633470505147, 'learning_rate': 0.010146576106255238}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:30:34,306] Trial 37 finished with value: 0.40875561464574545 and parameters: {'max_depth': 7, 'min_child_weight': 24.865944352215646, 'gamma': 2.0994562321584347, 'subsample': 0.5429145374031779, 'colsample_bytree': 0.9272609450989295, 'colsample_bylevel': 0.9597775068146596, 'reg_lambda': 0.0010479635130896208, 'reg_alpha': 0.4836605924539377, 'learning_rate': 0.0035356186207316856}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:31:41,684] Trial 38 finished with value: 0.4191277171706487 and parameters: {'max_depth': 8, 'min_child_weight': 0.0002917891447036052, 'gamma': 1.023122485631712, 'subsample': 0.7980293621912709, 'colsample_bytree': 0.8145464006520284, 'colsample_bylevel': 0.6597400480534246, 'reg_lambda': 17.8418939824245, 'reg_alpha': 0.01482900913652584, 'learning_rate': 0.00266981931937612}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:32:35,846] Trial 39 finished with value: 0.402017567226095 and parameters: {'max_depth': 7, 'min_child_weight': 0.027629850809147666, 'gamma': 5.993073880532252, 'subsample': 0.7342191605674676, 'colsample_bytree': 0.9779901514117088, 'colsample_bylevel': 0.5954365660173088, 'reg_lambda': 0.16401215038148406, 'reg_alpha': 4.571950118025557, 'learning_rate': 0.008311081536973755}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:32:47,520] Trial 40 finished with value: 0.41264271412570414 and parameters: {'max_depth': 9, 'min_child_weight': 9.200657911080354, 'gamma': 0.6063098409227647, 'subsample': 0.5771959106835363, 'colsample_bytree': 0.9993880327724501, 'colsample_bylevel': 0.8340141875903644, 'reg_lambda': 0.012257847556441917, 'reg_alpha': 0.0062427849347333805, 'learning_rate': 0.015526146506205957}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:32:53,674] Trial 41 finished with value: 0.40329209679788663 and parameters: {'max_depth': 12, 'min_child_weight': 1.40857236226368, 'gamma': 2.5942556295575465, 'subsample': 0.5243947055944544, 'colsample_bytree': 0.7454983104284505, 'colsample_bylevel': 0.6875804342185866, 'reg_lambda': 0.057794675795871414, 'reg_alpha': 0.2522637695059558, 'learning_rate': 0.038060074094008065}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:33:01,093] Trial 42 finished with value: 0.4136160376832643 and parameters: {'max_depth': 11, 'min_child_weight': 0.6874469974077485, 'gamma': 1.8141567851319222, 'subsample': 0.5597426389067862, 'colsample_bytree': 0.8058097896546427, 'colsample_bylevel': 0.7697879832326091, 'reg_lambda': 0.031358044836263, 'reg_alpha': 0.13284738223606596, 'learning_rate': 0.0292666801653202}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:33:10,818] Trial 43 finished with value: 0.4219129733547435 and parameters: {'max_depth': 10, 'min_child_weight': 3.698209055570583, 'gamma': 1.1452399565785405, 'subsample': 0.6116180978307789, 'colsample_bytree': 0.625542027160621, 'colsample_bylevel': 0.668960235027872, 'reg_lambda': 0.004337384786008033, 'reg_alpha': 0.042488682104944775, 'learning_rate': 0.020698510350031337}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:33:20,501] Trial 44 finished with value: 0.4161440729033578 and parameters: {'max_depth': 10, 'min_child_weight': 2.979597394793177, 'gamma': 1.1360606609854285, 'subsample': 0.6175931649668293, 'colsample_bytree': 0.6214170278800357, 'colsample_bylevel': 0.7389275584310487, 'reg_lambda': 0.002446309514301138, 'reg_alpha': 0.03519923999299205, 'learning_rate': 0.021224402186808784}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:33:37,984] Trial 45 finished with value: 0.4072095929419204 and parameters: {'max_depth': 10, 'min_child_weight': 32.038497773364526, 'gamma': 0.33687665649372894, 'subsample': 0.6620553319104672, 'colsample_bytree': 0.540859681630605, 'colsample_bylevel': 0.6669709318626065, 'reg_lambda': 0.004030935854716226, 'reg_alpha': 0.053960090429339434, 'learning_rate': 0.012861011111967717}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:33:49,774] Trial 46 finished with value: 0.41994046396779433 and parameters: {'max_depth': 9, 'min_child_weight': 8.37144030688914, 'gamma': 0.5331408690554362, 'subsample': 0.6463169443832527, 'colsample_bytree': 0.6757403250022409, 'colsample_bylevel': 0.6158621301065643, 'reg_lambda': 0.875998657012518, 'reg_alpha': 0.02247570538010391, 'learning_rate': 0.01735148519074691}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:34:52,739] Trial 47 finished with value: 0.39791780239870067 and parameters: {'max_depth': 11, 'min_child_weight': 0.4448419867446838, 'gamma': 2.964497623779777, 'subsample': 0.5822410380517846, 'colsample_bytree': 0.5690616004538265, 'colsample_bylevel': 0.7066159945398938, 'reg_lambda': 0.013514689752012966, 'reg_alpha': 15.118725771848034, 'learning_rate': 0.007347894564646892}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:35:14,612] Trial 48 finished with value: 0.40658033419322503 and parameters: {'max_depth': 8, 'min_child_weight': 0.20164657603599317, 'gamma': 3.6143066431317186, 'subsample': 0.6977983025162905, 'colsample_bytree': 0.5087635849222591, 'colsample_bylevel': 0.6664926417786214, 'reg_lambda': 0.3005481060083995, 'reg_alpha': 0.9275655543321681, 'learning_rate': 0.00986825778111396}. Best is trial 20 with value: 0.4274249776524666.\n",
      "[I 2025-10-14 11:35:22,398] Trial 49 finished with value: 0.40280862209290486 and parameters: {'max_depth': 10, 'min_child_weight': 0.003903864995887393, 'gamma': 8.976122446570365, 'subsample': 0.6088249570427662, 'colsample_bytree': 0.8932453914900429, 'colsample_bylevel': 0.7469941900423486, 'reg_lambda': 0.1053660146255402, 'reg_alpha': 0.3830221013392872, 'learning_rate': 0.06615998029535208}. Best is trial 20 with value: 0.4274249776524666.\n",
      "XGBoost 最佳 F1: 0.42742\n",
      "最佳参数: {'max_depth': 10, 'min_child_weight': 0.007677962302505544, 'gamma': 1.4726135966679625, 'subsample': 0.6893629358077933, 'colsample_bytree': 0.964051434116436, 'colsample_bylevel': 0.7183682595935981, 'reg_lambda': 0.04850690368626894, 'reg_alpha': 0.13235616186575136, 'learning_rate': 0.004393287593382586}\n"
     ]
    }
   ],
   "source": [
    "# 优化 XGBoost\n",
    "print(\"\\n优化 XGBoost 模型...\")\n",
    "xgb_study = optimize_model_with_gpu(\"xgb\", train_X, train_y_converted, n_classes, seed)\n",
    "print(f\"XGBoost 最佳 F1: {xgb_study.best_value:.5f}\")\n",
    "print(\"最佳参数:\", xgb_study.best_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "95fa8e8a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[I 2025-10-14 11:35:22,493] A new study created in memory with name: no-name-89dcbaf0-8980-43da-a27a-30acf8b1e28c\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "优化 CatBoost 模型...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ed5f5532c3474fa88b76d9005467b833",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/50 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[I 2025-10-14 11:36:26,300] Trial 0 finished with value: 0.4199226949055026 and parameters: {'learning_rate': 0.005611516415334507, 'depth': 10, 'l2_leaf_reg': 7.587945476302646, 'random_strength': 5.986584841970366, 'border_count': 66, 'min_data_in_leaf': 16, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6011150117432088}. Best is trial 0 with value: 0.4199226949055026.\n",
      "[I 2025-10-14 11:36:33,907] Trial 1 finished with value: 0.4118790220255922 and parameters: {'learning_rate': 0.02607024758370768, 'depth': 4, 'l2_leaf_reg': 9.72918866945795, 'random_strength': 8.324426408004218, 'border_count': 79, 'min_data_in_leaf': 19, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.5247564316322378}. Best is trial 0 with value: 0.4199226949055026.\n",
      "[I 2025-10-14 11:36:49,542] Trial 2 finished with value: 0.41710207387155984 and parameters: {'learning_rate': 0.007309539835912915, 'depth': 6, 'l2_leaf_reg': 6.506676052501415, 'random_strength': 1.3949386065204183, 'border_count': 97, 'min_data_in_leaf': 37, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.19967378215835974}. Best is trial 0 with value: 0.4199226949055026.\n",
      "[I 2025-10-14 11:37:02,390] Trial 3 finished with value: 0.42315377848076025 and parameters: {'learning_rate': 0.01067748270948136, 'depth': 8, 'l2_leaf_reg': 1.4180537144799796, 'random_strength': 6.075448519014383, 'border_count': 70, 'min_data_in_leaf': 7, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8083973481164611}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:37:33,104] Trial 4 finished with value: 0.41339101948822493 and parameters: {'learning_rate': 0.0040665633135147945, 'depth': 4, 'l2_leaf_reg': 7.158097238609412, 'random_strength': 4.4015249373960135, 'border_count': 59, 'min_data_in_leaf': 50, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.2587799816000169}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:37:41,381] Trial 5 finished with value: 0.4162503358167287 and parameters: {'learning_rate': 0.02113705944064573, 'depth': 6, 'l2_leaf_reg': 5.680612190600297, 'random_strength': 5.4671027934327965, 'border_count': 73, 'min_data_in_leaf': 97, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8948273504276488}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:38:01,597] Trial 6 finished with value: 0.403199160524276 and parameters: {'learning_rate': 0.015696396388661146, 'depth': 10, 'l2_leaf_reg': 1.7964325184672756, 'random_strength': 1.959828624191452, 'border_count': 42, 'min_data_in_leaf': 33, 'bootstrap_type': 'Bernoulli', 'subsample': 0.9143687545759647}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:38:20,739] Trial 7 finished with value: 0.41479861606406093 and parameters: {'learning_rate': 0.005170191786366992, 'depth': 5, 'l2_leaf_reg': 5.884264748424236, 'random_strength': 1.4092422497476265, 'border_count': 211, 'min_data_in_leaf': 8, 'bootstrap_type': 'Bernoulli', 'subsample': 0.5993578407670862}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:39:40,796] Trial 8 finished with value: 0.395600085195944 and parameters: {'learning_rate': 0.0010257563974185654, 'depth': 9, 'l2_leaf_reg': 7.361716094628554, 'random_strength': 7.2900716804098735, 'border_count': 204, 'min_data_in_leaf': 8, 'bootstrap_type': 'Bernoulli', 'subsample': 0.9315517129377968}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:39:48,207] Trial 9 finished with value: 0.4146490246213969 and parameters: {'learning_rate': 0.01764396768338155, 'depth': 6, 'l2_leaf_reg': 1.5720251525742128, 'random_strength': 3.109823217156622, 'border_count': 104, 'min_data_in_leaf': 73, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.4722149251619493}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:39:54,168] Trial 10 finished with value: 0.41165701326346354 and parameters: {'learning_rate': 0.07767353816237044, 'depth': 8, 'l2_leaf_reg': 3.2990398791506856, 'random_strength': 9.322903419117871, 'border_count': 148, 'min_data_in_leaf': 70, 'bootstrap_type': 'Bernoulli', 'subsample': 0.5098613724959347}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:43:27,218] Trial 11 finished with value: 0.4179437802549765 and parameters: {'learning_rate': 0.0017680698828951285, 'depth': 10, 'l2_leaf_reg': 9.35269427682025, 'random_strength': 6.174544729930211, 'border_count': 140, 'min_data_in_leaf': 4, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.858332321641111}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:44:17,070] Trial 12 finished with value: 0.4204861834111219 and parameters: {'learning_rate': 0.0030590718967941047, 'depth': 8, 'l2_leaf_reg': 3.402919656508326, 'random_strength': 6.8974061235197945, 'border_count': 134, 'min_data_in_leaf': 25, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6526812002249809}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:45:33,911] Trial 13 finished with value: 0.4160071296291144 and parameters: {'learning_rate': 0.0016852924474987086, 'depth': 8, 'l2_leaf_reg': 3.7974191260637458, 'random_strength': 7.293054474785954, 'border_count': 147, 'min_data_in_leaf': 30, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7547337969095175}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:46:18,528] Trial 14 finished with value: 0.4176949837158512 and parameters: {'learning_rate': 0.002814145272515241, 'depth': 8, 'l2_leaf_reg': 3.3326189669607658, 'random_strength': 4.236504710481763, 'border_count': 183, 'min_data_in_leaf': 22, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9960169893612135}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:46:33,137] Trial 15 finished with value: 0.41793454896928656 and parameters: {'learning_rate': 0.01109849610788091, 'depth': 7, 'l2_leaf_reg': 4.489294474424086, 'random_strength': 9.810534762256436, 'border_count': 121, 'min_data_in_leaf': 47, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6918348202320201}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:46:42,653] Trial 16 finished with value: 0.417398808715095 and parameters: {'learning_rate': 0.044166512212508516, 'depth': 9, 'l2_leaf_reg': 2.283319594853481, 'random_strength': 7.065196583537812, 'border_count': 245, 'min_data_in_leaf': 65, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.38661114294792376}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:46:56,289] Trial 17 finished with value: 0.42296429195837 and parameters: {'learning_rate': 0.009846031520854467, 'depth': 7, 'l2_leaf_reg': 1.0397748285730068, 'random_strength': 8.596498751955512, 'border_count': 32, 'min_data_in_leaf': 24, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6993078026779718}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:47:02,468] Trial 18 finished with value: 0.41053863206992874 and parameters: {'learning_rate': 0.04449731215201574, 'depth': 7, 'l2_leaf_reg': 2.3221054292513905, 'random_strength': 8.592831660600938, 'border_count': 36, 'min_data_in_leaf': 39, 'bootstrap_type': 'Bernoulli', 'subsample': 0.7282834413111813}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:47:18,235] Trial 19 finished with value: 0.41829048570436017 and parameters: {'learning_rate': 0.013325513569325079, 'depth': 9, 'l2_leaf_reg': 1.361401515388108, 'random_strength': 0.06797370321638585, 'border_count': 33, 'min_data_in_leaf': 3, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8157298373095427}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:47:33,933] Trial 20 finished with value: 0.42094256370767624 and parameters: {'learning_rate': 0.008458204049986944, 'depth': 7, 'l2_leaf_reg': 1.1103032601416567, 'random_strength': 8.454068297109181, 'border_count': 98, 'min_data_in_leaf': 60, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9769280173488694}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:47:48,586] Trial 21 finished with value: 0.41679017519494804 and parameters: {'learning_rate': 0.008629076312199476, 'depth': 7, 'l2_leaf_reg': 1.1410653602706344, 'random_strength': 7.923636884349011, 'border_count': 89, 'min_data_in_leaf': 61, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9574144027654565}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:48:02,669] Trial 22 finished with value: 0.4140839173594211 and parameters: {'learning_rate': 0.008129654959622422, 'depth': 6, 'l2_leaf_reg': 2.602335282422991, 'random_strength': 9.21373271545501, 'border_count': 55, 'min_data_in_leaf': 80, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7927707419220583}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:48:10,997] Trial 23 finished with value: 0.41947284733756746 and parameters: {'learning_rate': 0.027382876429614796, 'depth': 7, 'l2_leaf_reg': 4.697431643998616, 'random_strength': 8.21802844749453, 'border_count': 111, 'min_data_in_leaf': 55, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7311798641574007}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:48:27,903] Trial 24 finished with value: 0.4203946971620991 and parameters: {'learning_rate': 0.006270995866881276, 'depth': 5, 'l2_leaf_reg': 1.1211367772592553, 'random_strength': 9.974698284809733, 'border_count': 57, 'min_data_in_leaf': 14, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9044914528067214}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:49:01,074] Trial 25 finished with value: 0.41835500787474594 and parameters: {'learning_rate': 0.004062073207552914, 'depth': 8, 'l2_leaf_reg': 2.383404365165539, 'random_strength': 6.2541085159188, 'border_count': 85, 'min_data_in_leaf': 86, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.019515034516453256}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:49:15,514] Trial 26 finished with value: 0.41963039124584617 and parameters: {'learning_rate': 0.010566754024489445, 'depth': 7, 'l2_leaf_reg': 2.906133739729556, 'random_strength': 4.7900419472025835, 'border_count': 169, 'min_data_in_leaf': 44, 'bootstrap_type': 'Bernoulli', 'subsample': 0.7540693478008181}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:49:26,041] Trial 27 finished with value: 0.4161087461613951 and parameters: {'learning_rate': 0.011995648411205066, 'depth': 5, 'l2_leaf_reg': 1.8499127995982274, 'random_strength': 8.92609208080317, 'border_count': 48, 'min_data_in_leaf': 27, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6142660070113009}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:49:35,189] Trial 28 finished with value: 0.41326374753468365 and parameters: {'learning_rate': 0.045036799642780276, 'depth': 9, 'l2_leaf_reg': 4.285424137935569, 'random_strength': 3.4154891881681944, 'border_count': 121, 'min_data_in_leaf': 56, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8250325600520582}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:49:55,675] Trial 29 finished with value: 0.4191608124223551 and parameters: {'learning_rate': 0.005194226358974326, 'depth': 8, 'l2_leaf_reg': 1.0529133744022268, 'random_strength': 5.599504828921596, 'border_count': 69, 'min_data_in_leaf': 13, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9366691803128739}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:50:01,966] Trial 30 finished with value: 0.4186105549426558 and parameters: {'learning_rate': 0.030801191617410986, 'depth': 6, 'l2_leaf_reg': 2.0638911065213894, 'random_strength': 7.658071526342088, 'border_count': 71, 'min_data_in_leaf': 18, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7447843751745281}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:50:52,909] Trial 31 finished with value: 0.41899043206172254 and parameters: {'learning_rate': 0.0028461667157374908, 'depth': 8, 'l2_leaf_reg': 3.110256454795803, 'random_strength': 6.655965006002666, 'border_count': 134, 'min_data_in_leaf': 25, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6500005135562105}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:51:38,403] Trial 32 finished with value: 0.41611446864373386 and parameters: {'learning_rate': 0.0030121922439321613, 'depth': 7, 'l2_leaf_reg': 8.55854610563452, 'random_strength': 6.710502127809463, 'border_count': 95, 'min_data_in_leaf': 40, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.5565522168404771}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:51:54,241] Trial 33 finished with value: 0.4140562559681022 and parameters: {'learning_rate': 0.007694260451335619, 'depth': 7, 'l2_leaf_reg': 1.7729738412997236, 'random_strength': 8.358194924922866, 'border_count': 160, 'min_data_in_leaf': 33, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.4574726274220104}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:52:31,326] Trial 34 finished with value: 0.42050143290594527 and parameters: {'learning_rate': 0.004252381896651974, 'depth': 8, 'l2_leaf_reg': 3.927677525748404, 'random_strength': 7.633260708239612, 'border_count': 115, 'min_data_in_leaf': 20, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.6889375133815724}. Best is trial 3 with value: 0.42315377848076025.\n",
      "[I 2025-10-14 11:53:22,587] Trial 35 finished with value: 0.42539843110715575 and parameters: {'learning_rate': 0.004243370903305618, 'depth': 9, 'l2_leaf_reg': 4.876005532615329, 'random_strength': 7.685099399082659, 'border_count': 80, 'min_data_in_leaf': 1, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8609968152425995}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:53:39,605] Trial 36 finished with value: 0.4197897293502875 and parameters: {'learning_rate': 0.015963914520519505, 'depth': 9, 'l2_leaf_reg': 6.634381612220948, 'random_strength': 5.88495206310656, 'border_count': 79, 'min_data_in_leaf': 1, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8633656957802833}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:54:33,283] Trial 37 finished with value: 0.4183900096154815 and parameters: {'learning_rate': 0.006151405295194651, 'depth': 10, 'l2_leaf_reg': 4.829213271001143, 'random_strength': 8.578504059323684, 'border_count': 64, 'min_data_in_leaf': 13, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.9714396932343833}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:54:49,214] Trial 38 finished with value: 0.42496192884636913 and parameters: {'learning_rate': 0.019985321040645935, 'depth': 9, 'l2_leaf_reg': 5.450528331494436, 'random_strength': 5.183869909265772, 'border_count': 46, 'min_data_in_leaf': 10, 'bootstrap_type': 'Bernoulli', 'subsample': 0.7833852895309387}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:55:04,428] Trial 39 finished with value: 0.41569550024264584 and parameters: {'learning_rate': 0.021778791978909827, 'depth': 9, 'l2_leaf_reg': 5.93956225894554, 'random_strength': 5.119809787127025, 'border_count': 46, 'min_data_in_leaf': 8, 'bootstrap_type': 'Bernoulli', 'subsample': 0.7708993301130836}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:55:25,961] Trial 40 finished with value: 0.39540137250256563 and parameters: {'learning_rate': 0.02158013578573036, 'depth': 10, 'l2_leaf_reg': 7.906365419521912, 'random_strength': 3.7996249828141377, 'border_count': 32, 'min_data_in_leaf': 9, 'bootstrap_type': 'Bernoulli', 'subsample': 0.8425849573925933}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:55:51,551] Trial 41 finished with value: 0.41406223936513165 and parameters: {'learning_rate': 0.009331295103551884, 'depth': 9, 'l2_leaf_reg': 5.6615222838782975, 'random_strength': 5.35519687016071, 'border_count': 51, 'min_data_in_leaf': 7, 'bootstrap_type': 'Bernoulli', 'subsample': 0.6494163889034419}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:56:13,124] Trial 42 finished with value: 0.414426232264077 and parameters: {'learning_rate': 0.01453606077078154, 'depth': 9, 'l2_leaf_reg': 6.447156680080133, 'random_strength': 4.631134204425685, 'border_count': 79, 'min_data_in_leaf': 13, 'bootstrap_type': 'Bernoulli', 'subsample': 0.8430803052587702}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:56:23,062] Trial 43 finished with value: 0.4054822889182015 and parameters: {'learning_rate': 0.017014489583554598, 'depth': 6, 'l2_leaf_reg': 5.170257270197454, 'random_strength': 9.437710619990472, 'border_count': 100, 'min_data_in_leaf': 1, 'bootstrap_type': 'Bernoulli', 'subsample': 0.9990393634310047}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:56:55,105] Trial 44 finished with value: 0.4173132150322593 and parameters: {'learning_rate': 0.006619093964562549, 'depth': 10, 'l2_leaf_reg': 1.5859930543383238, 'random_strength': 6.422885740170382, 'border_count': 63, 'min_data_in_leaf': 6, 'bootstrap_type': 'Bernoulli', 'subsample': 0.6621797034180515}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:57:34,591] Trial 45 finished with value: 0.4210097936560191 and parameters: {'learning_rate': 0.004214299083387765, 'depth': 8, 'l2_leaf_reg': 5.118770974889317, 'random_strength': 8.012979953420365, 'border_count': 41, 'min_data_in_leaf': 16, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7902348147104918}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:58:37,281] Trial 46 finished with value: 0.40678535042574504 and parameters: {'learning_rate': 0.0019431815336662774, 'depth': 8, 'l2_leaf_reg': 5.288391593219783, 'random_strength': 7.52405322494231, 'border_count': 41, 'min_data_in_leaf': 17, 'bootstrap_type': 'Bernoulli', 'subsample': 0.5327351764377143}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:59:12,296] Trial 47 finished with value: 0.41867299863024326 and parameters: {'learning_rate': 0.0038802537185262127, 'depth': 8, 'l2_leaf_reg': 4.120966245147668, 'random_strength': 2.532728409740018, 'border_count': 42, 'min_data_in_leaf': 22, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.572495382169874}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 11:59:23,548] Trial 48 finished with value: 0.41899195982418164 and parameters: {'learning_rate': 0.03275005796444346, 'depth': 9, 'l2_leaf_reg': 6.131863686437905, 'random_strength': 5.948768507257198, 'border_count': 56, 'min_data_in_leaf': 11, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.7769648831052092}. Best is trial 35 with value: 0.42539843110715575.\n",
      "[I 2025-10-14 12:00:29,450] Trial 49 finished with value: 0.41579949581343234 and parameters: {'learning_rate': 0.0023250856663668894, 'depth': 8, 'l2_leaf_reg': 6.9707595997594485, 'random_strength': 7.15503879018817, 'border_count': 74, 'min_data_in_leaf': 33, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.852167930950762}. Best is trial 35 with value: 0.42539843110715575.\n",
      "CatBoost 最佳 F1: 0.42540\n",
      "最佳参数: {'learning_rate': 0.004243370903305618, 'depth': 9, 'l2_leaf_reg': 4.876005532615329, 'random_strength': 7.685099399082659, 'border_count': 80, 'min_data_in_leaf': 1, 'bootstrap_type': 'Bayesian', 'bagging_temperature': 0.8609968152425995}\n"
     ]
    }
   ],
   "source": [
    "# 优化 CatBoost\n",
    "print(\"\\n优化 CatBoost 模型...\")\n",
    "cat_study = optimize_model_with_gpu(\"cat\", train_X, train_y_converted, n_classes, seed)\n",
    "print(f\"CatBoost 最佳 F1: {cat_study.best_value:.5f}\")\n",
    "print(\"最佳参数:\", cat_study.best_params)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "e42a06aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "训练最终 LightGBM 模型...\n",
      "Training until validation scores don't improve for 500 rounds\n",
      "[100]\ttraining's multi_logloss: 0.716295\n",
      "[200]\ttraining's multi_logloss: 0.629002\n",
      "[300]\ttraining's multi_logloss: 0.614035\n",
      "[400]\ttraining's multi_logloss: 0.614035\n",
      "[500]\ttraining's multi_logloss: 0.614035\n",
      "[600]\ttraining's multi_logloss: 0.614035\n",
      "[700]\ttraining's multi_logloss: 0.614035\n",
      "[800]\ttraining's multi_logloss: 0.614035\n",
      "[900]\ttraining's multi_logloss: 0.614035\n",
      "[1000]\ttraining's multi_logloss: 0.614035\n",
      "[1100]\ttraining's multi_logloss: 0.614035\n",
      "[1200]\ttraining's multi_logloss: 0.614035\n",
      "[1300]\ttraining's multi_logloss: 0.614035\n",
      "[1400]\ttraining's multi_logloss: 0.614035\n",
      "[1500]\ttraining's multi_logloss: 0.614035\n",
      "[1600]\ttraining's multi_logloss: 0.614035\n",
      "[1700]\ttraining's multi_logloss: 0.614035\n",
      "[1800]\ttraining's multi_logloss: 0.614035\n",
      "[1900]\ttraining's multi_logloss: 0.614035\n",
      "[2000]\ttraining's multi_logloss: 0.614035\n",
      "[2100]\ttraining's multi_logloss: 0.614035\n",
      "[2200]\ttraining's multi_logloss: 0.614035\n",
      "[2300]\ttraining's multi_logloss: 0.614035\n",
      "[2400]\ttraining's multi_logloss: 0.614035\n",
      "[2500]\ttraining's multi_logloss: 0.614035\n",
      "[2600]\ttraining's multi_logloss: 0.614035\n",
      "[2700]\ttraining's multi_logloss: 0.614035\n",
      "[2800]\ttraining's multi_logloss: 0.614035\n",
      "[2900]\ttraining's multi_logloss: 0.614035\n",
      "[3000]\ttraining's multi_logloss: 0.614035\n",
      "[3100]\ttraining's multi_logloss: 0.614035\n",
      "[3200]\ttraining's multi_logloss: 0.614035\n",
      "[3300]\ttraining's multi_logloss: 0.614035\n",
      "[3400]\ttraining's multi_logloss: 0.614035\n",
      "[3500]\ttraining's multi_logloss: 0.614035\n",
      "[3600]\ttraining's multi_logloss: 0.614035\n",
      "[3700]\ttraining's multi_logloss: 0.614035\n",
      "[3800]\ttraining's multi_logloss: 0.614035\n",
      "[3900]\ttraining's multi_logloss: 0.614035\n",
      "[4000]\ttraining's multi_logloss: 0.614035\n",
      "[4100]\ttraining's multi_logloss: 0.614035\n",
      "[4200]\ttraining's multi_logloss: 0.614035\n",
      "[4300]\ttraining's multi_logloss: 0.614035\n",
      "[4400]\ttraining's multi_logloss: 0.614035\n",
      "[4500]\ttraining's multi_logloss: 0.614035\n",
      "[4600]\ttraining's multi_logloss: 0.614035\n",
      "[4700]\ttraining's multi_logloss: 0.614035\n",
      "[4800]\ttraining's multi_logloss: 0.614035\n",
      "[4900]\ttraining's multi_logloss: 0.614035\n",
      "[5000]\ttraining's multi_logloss: 0.614035\n",
      "[5100]\ttraining's multi_logloss: 0.614035\n",
      "[5200]\ttraining's multi_logloss: 0.614035\n",
      "[5300]\ttraining's multi_logloss: 0.614035\n",
      "[5400]\ttraining's multi_logloss: 0.614035\n",
      "[5500]\ttraining's multi_logloss: 0.614035\n",
      "[5600]\ttraining's multi_logloss: 0.614035\n",
      "[5700]\ttraining's multi_logloss: 0.614035\n",
      "[5800]\ttraining's multi_logloss: 0.614035\n",
      "[5900]\ttraining's multi_logloss: 0.614035\n",
      "[6000]\ttraining's multi_logloss: 0.614035\n",
      "[6100]\ttraining's multi_logloss: 0.614035\n",
      "[6200]\ttraining's multi_logloss: 0.614035\n",
      "[6300]\ttraining's multi_logloss: 0.614035\n",
      "[6400]\ttraining's multi_logloss: 0.614035\n",
      "[6500]\ttraining's multi_logloss: 0.614035\n",
      "[6600]\ttraining's multi_logloss: 0.614035\n",
      "[6700]\ttraining's multi_logloss: 0.614035\n",
      "[6800]\ttraining's multi_logloss: 0.614035\n",
      "[6900]\ttraining's multi_logloss: 0.614035\n",
      "[7000]\ttraining's multi_logloss: 0.614035\n",
      "[7100]\ttraining's multi_logloss: 0.614035\n",
      "[7200]\ttraining's multi_logloss: 0.614035\n",
      "[7300]\ttraining's multi_logloss: 0.614035\n",
      "[7400]\ttraining's multi_logloss: 0.614035\n",
      "[7500]\ttraining's multi_logloss: 0.614035\n",
      "[7600]\ttraining's multi_logloss: 0.614035\n",
      "[7700]\ttraining's multi_logloss: 0.614035\n",
      "[7800]\ttraining's multi_logloss: 0.614035\n",
      "[7900]\ttraining's multi_logloss: 0.614035\n",
      "[8000]\ttraining's multi_logloss: 0.614035\n",
      "[8100]\ttraining's multi_logloss: 0.614035\n",
      "[8200]\ttraining's multi_logloss: 0.614035\n",
      "[8300]\ttraining's multi_logloss: 0.614035\n",
      "[8400]\ttraining's multi_logloss: 0.614035\n",
      "[8500]\ttraining's multi_logloss: 0.614035\n",
      "[8600]\ttraining's multi_logloss: 0.614035\n",
      "[8700]\ttraining's multi_logloss: 0.614035\n",
      "[8800]\ttraining's multi_logloss: 0.614035\n",
      "[8900]\ttraining's multi_logloss: 0.614035\n",
      "[9000]\ttraining's multi_logloss: 0.614035\n",
      "[9100]\ttraining's multi_logloss: 0.614035\n",
      "[9200]\ttraining's multi_logloss: 0.614035\n",
      "[9300]\ttraining's multi_logloss: 0.614035\n",
      "[9400]\ttraining's multi_logloss: 0.614035\n",
      "[9500]\ttraining's multi_logloss: 0.614035\n",
      "[9600]\ttraining's multi_logloss: 0.614035\n",
      "[9700]\ttraining's multi_logloss: 0.614035\n",
      "[9800]\ttraining's multi_logloss: 0.614035\n",
      "[9900]\ttraining's multi_logloss: 0.614035\n",
      "[10000]\ttraining's multi_logloss: 0.614035\n",
      "Did not meet early stopping. Best iteration is:\n",
      "[284]\ttraining's multi_logloss: 0.614035\n",
      "\n",
      "训练最终 XGBoost 模型...\n",
      "[0]\ttrain-mlogloss:1.38273\n",
      "[100]\ttrain-mlogloss:1.09501\n",
      "[200]\ttrain-mlogloss:0.90638\n",
      "[300]\ttrain-mlogloss:0.77913\n",
      "[400]\ttrain-mlogloss:0.69159\n",
      "[500]\ttrain-mlogloss:0.63234\n",
      "[600]\ttrain-mlogloss:0.59172\n",
      "[700]\ttrain-mlogloss:0.56420\n",
      "[800]\ttrain-mlogloss:0.54563\n",
      "[900]\ttrain-mlogloss:0.53172\n",
      "[1000]\ttrain-mlogloss:0.52251\n",
      "[1100]\ttrain-mlogloss:0.51524\n",
      "[1200]\ttrain-mlogloss:0.50969\n",
      "[1300]\ttrain-mlogloss:0.50574\n",
      "[1400]\ttrain-mlogloss:0.50187\n",
      "[1500]\ttrain-mlogloss:0.49903\n",
      "[1600]\ttrain-mlogloss:0.49666\n",
      "[1700]\ttrain-mlogloss:0.49417\n",
      "[1800]\ttrain-mlogloss:0.49208\n",
      "[1900]\ttrain-mlogloss:0.49018\n",
      "[2000]\ttrain-mlogloss:0.48838\n",
      "[2100]\ttrain-mlogloss:0.48688\n",
      "[2200]\ttrain-mlogloss:0.48564\n",
      "[2300]\ttrain-mlogloss:0.48423\n",
      "[2400]\ttrain-mlogloss:0.48305\n",
      "[2500]\ttrain-mlogloss:0.48161\n",
      "[2600]\ttrain-mlogloss:0.48070\n",
      "[2700]\ttrain-mlogloss:0.47966\n",
      "[2800]\ttrain-mlogloss:0.47866\n",
      "[2900]\ttrain-mlogloss:0.47765\n",
      "[3000]\ttrain-mlogloss:0.47680\n",
      "[3100]\ttrain-mlogloss:0.47599\n",
      "[3200]\ttrain-mlogloss:0.47496\n",
      "[3300]\ttrain-mlogloss:0.47420\n",
      "[3400]\ttrain-mlogloss:0.47334\n",
      "[3500]\ttrain-mlogloss:0.47257\n",
      "[3600]\ttrain-mlogloss:0.47191\n",
      "[3700]\ttrain-mlogloss:0.47145\n",
      "[3800]\ttrain-mlogloss:0.47074\n",
      "[3900]\ttrain-mlogloss:0.47020\n",
      "[4000]\ttrain-mlogloss:0.46948\n",
      "[4100]\ttrain-mlogloss:0.46898\n",
      "[4200]\ttrain-mlogloss:0.46834\n",
      "[4300]\ttrain-mlogloss:0.46767\n",
      "[4400]\ttrain-mlogloss:0.46714\n",
      "[4500]\ttrain-mlogloss:0.46657\n",
      "[4600]\ttrain-mlogloss:0.46614\n",
      "[4700]\ttrain-mlogloss:0.46575\n",
      "[4800]\ttrain-mlogloss:0.46546\n",
      "[4900]\ttrain-mlogloss:0.46479\n",
      "[5000]\ttrain-mlogloss:0.46436\n",
      "[5100]\ttrain-mlogloss:0.46386\n",
      "[5200]\ttrain-mlogloss:0.46324\n",
      "[5300]\ttrain-mlogloss:0.46266\n",
      "[5400]\ttrain-mlogloss:0.46225\n",
      "[5500]\ttrain-mlogloss:0.46190\n",
      "[5600]\ttrain-mlogloss:0.46150\n",
      "[5700]\ttrain-mlogloss:0.46111\n",
      "[5800]\ttrain-mlogloss:0.46081\n",
      "[5900]\ttrain-mlogloss:0.46030\n",
      "[6000]\ttrain-mlogloss:0.45994\n",
      "[6100]\ttrain-mlogloss:0.45958\n",
      "[6200]\ttrain-mlogloss:0.45914\n",
      "[6300]\ttrain-mlogloss:0.45871\n",
      "[6400]\ttrain-mlogloss:0.45830\n",
      "[6500]\ttrain-mlogloss:0.45784\n",
      "[6600]\ttrain-mlogloss:0.45740\n",
      "[6700]\ttrain-mlogloss:0.45710\n",
      "[6800]\ttrain-mlogloss:0.45680\n",
      "[6900]\ttrain-mlogloss:0.45650\n",
      "[7000]\ttrain-mlogloss:0.45601\n",
      "[7100]\ttrain-mlogloss:0.45573\n",
      "[7200]\ttrain-mlogloss:0.45532\n",
      "[7300]\ttrain-mlogloss:0.45497\n",
      "[7400]\ttrain-mlogloss:0.45465\n",
      "[7500]\ttrain-mlogloss:0.45438\n",
      "[7600]\ttrain-mlogloss:0.45415\n",
      "[7700]\ttrain-mlogloss:0.45392\n",
      "[7800]\ttrain-mlogloss:0.45371\n",
      "[7900]\ttrain-mlogloss:0.45329\n",
      "[8000]\ttrain-mlogloss:0.45295\n",
      "[8100]\ttrain-mlogloss:0.45266\n",
      "[8200]\ttrain-mlogloss:0.45239\n",
      "[8300]\ttrain-mlogloss:0.45217\n",
      "[8400]\ttrain-mlogloss:0.45190\n",
      "[8500]\ttrain-mlogloss:0.45175\n",
      "[8600]\ttrain-mlogloss:0.45144\n",
      "[8700]\ttrain-mlogloss:0.45121\n",
      "[8800]\ttrain-mlogloss:0.45099\n",
      "[8900]\ttrain-mlogloss:0.45074\n",
      "[9000]\ttrain-mlogloss:0.45052\n",
      "[9100]\ttrain-mlogloss:0.45033\n",
      "[9200]\ttrain-mlogloss:0.45015\n",
      "[9300]\ttrain-mlogloss:0.44994\n",
      "[9400]\ttrain-mlogloss:0.44978\n",
      "[9500]\ttrain-mlogloss:0.44953\n",
      "[9600]\ttrain-mlogloss:0.44921\n",
      "[9700]\ttrain-mlogloss:0.44897\n",
      "[9800]\ttrain-mlogloss:0.44875\n",
      "[9900]\ttrain-mlogloss:0.44850\n",
      "[9999]\ttrain-mlogloss:0.44824\n",
      "\n",
      "训练最终 CatBoost 模型...\n",
      "0:\tlearn: 1.3830608\ttotal: 16.4ms\tremaining: 2m 43s\n",
      "100:\tlearn: 1.1351586\ttotal: 1.58s\tremaining: 2m 34s\n",
      "200:\tlearn: 0.9940312\ttotal: 3.16s\tremaining: 2m 33s\n",
      "300:\tlearn: 0.9047172\ttotal: 4.72s\tremaining: 2m 32s\n",
      "400:\tlearn: 0.8419779\ttotal: 6.24s\tremaining: 2m 29s\n",
      "500:\tlearn: 0.7975556\ttotal: 7.58s\tremaining: 2m 23s\n",
      "600:\tlearn: 0.7681801\ttotal: 8.49s\tremaining: 2m 12s\n",
      "700:\tlearn: 0.7538904\ttotal: 8.8s\tremaining: 1m 56s\n",
      "800:\tlearn: 0.7456481\ttotal: 9.05s\tremaining: 1m 43s\n",
      "900:\tlearn: 0.7406307\ttotal: 9.26s\tremaining: 1m 33s\n",
      "1000:\tlearn: 0.7378592\ttotal: 9.42s\tremaining: 1m 24s\n",
      "1100:\tlearn: 0.7355826\ttotal: 9.59s\tremaining: 1m 17s\n",
      "1200:\tlearn: 0.7330510\ttotal: 9.78s\tremaining: 1m 11s\n",
      "1300:\tlearn: 0.7302429\ttotal: 10s\tremaining: 1m 6s\n",
      "1400:\tlearn: 0.7290708\ttotal: 10.1s\tremaining: 1m 2s\n",
      "1500:\tlearn: 0.7270546\ttotal: 10.3s\tremaining: 58.6s\n",
      "1600:\tlearn: 0.7252786\ttotal: 10.5s\tremaining: 55.3s\n",
      "1700:\tlearn: 0.7231146\ttotal: 10.8s\tremaining: 52.5s\n",
      "1800:\tlearn: 0.7202990\ttotal: 11s\tremaining: 50.2s\n",
      "1900:\tlearn: 0.7165983\ttotal: 11.4s\tremaining: 48.5s\n",
      "2000:\tlearn: 0.7089386\ttotal: 12s\tremaining: 48.1s\n",
      "2100:\tlearn: 0.6969828\ttotal: 13.1s\tremaining: 49.1s\n",
      "2200:\tlearn: 0.6820003\ttotal: 14.4s\tremaining: 51s\n",
      "2300:\tlearn: 0.6656580\ttotal: 15.9s\tremaining: 53.2s\n",
      "2400:\tlearn: 0.6504355\ttotal: 17.4s\tremaining: 55s\n",
      "2500:\tlearn: 0.6363613\ttotal: 18.8s\tremaining: 56.5s\n",
      "2600:\tlearn: 0.6230732\ttotal: 20.3s\tremaining: 57.8s\n",
      "2700:\tlearn: 0.6107109\ttotal: 21.8s\tremaining: 58.8s\n",
      "2800:\tlearn: 0.5990030\ttotal: 23.2s\tremaining: 59.6s\n",
      "2900:\tlearn: 0.5882287\ttotal: 24.6s\tremaining: 1m\n",
      "3000:\tlearn: 0.5779241\ttotal: 26s\tremaining: 1m\n",
      "3100:\tlearn: 0.5680625\ttotal: 27.4s\tremaining: 1m 1s\n",
      "3200:\tlearn: 0.5586549\ttotal: 28.8s\tremaining: 1m 1s\n",
      "3300:\tlearn: 0.5491008\ttotal: 30.2s\tremaining: 1m 1s\n",
      "3400:\tlearn: 0.5402010\ttotal: 31.6s\tremaining: 1m 1s\n",
      "3500:\tlearn: 0.5317655\ttotal: 33s\tremaining: 1m 1s\n",
      "3600:\tlearn: 0.5233680\ttotal: 34.4s\tremaining: 1m 1s\n",
      "3700:\tlearn: 0.5154713\ttotal: 35.8s\tremaining: 1m\n",
      "3800:\tlearn: 0.5079927\ttotal: 37.2s\tremaining: 1m\n",
      "3900:\tlearn: 0.5004507\ttotal: 38.5s\tremaining: 1m\n",
      "4000:\tlearn: 0.4933870\ttotal: 39.9s\tremaining: 59.8s\n",
      "4100:\tlearn: 0.4862086\ttotal: 41.3s\tremaining: 59.4s\n",
      "4200:\tlearn: 0.4794091\ttotal: 42.7s\tremaining: 58.9s\n",
      "4300:\tlearn: 0.4726654\ttotal: 44s\tremaining: 58.4s\n",
      "4400:\tlearn: 0.4661131\ttotal: 45.4s\tremaining: 57.8s\n",
      "4500:\tlearn: 0.4593080\ttotal: 46.8s\tremaining: 57.1s\n",
      "4600:\tlearn: 0.4529803\ttotal: 48.1s\tremaining: 56.5s\n",
      "4700:\tlearn: 0.4463947\ttotal: 49.5s\tremaining: 55.8s\n",
      "4800:\tlearn: 0.4400156\ttotal: 50.9s\tremaining: 55.1s\n",
      "4900:\tlearn: 0.4335607\ttotal: 52.3s\tremaining: 54.4s\n",
      "5000:\tlearn: 0.4271367\ttotal: 53.7s\tremaining: 53.7s\n",
      "5100:\tlearn: 0.4212750\ttotal: 55.1s\tremaining: 52.9s\n",
      "5200:\tlearn: 0.4154752\ttotal: 56.5s\tremaining: 52.1s\n",
      "5300:\tlearn: 0.4098639\ttotal: 57.8s\tremaining: 51.3s\n",
      "5400:\tlearn: 0.4044887\ttotal: 59.2s\tremaining: 50.4s\n",
      "5500:\tlearn: 0.3990135\ttotal: 1m\tremaining: 49.6s\n",
      "5600:\tlearn: 0.3934736\ttotal: 1m 2s\tremaining: 48.7s\n",
      "5700:\tlearn: 0.3880260\ttotal: 1m 3s\tremaining: 47.8s\n",
      "5800:\tlearn: 0.3828080\ttotal: 1m 4s\tremaining: 46.9s\n",
      "5900:\tlearn: 0.3774884\ttotal: 1m 6s\tremaining: 46s\n",
      "6000:\tlearn: 0.3726116\ttotal: 1m 7s\tremaining: 45s\n",
      "6100:\tlearn: 0.3677764\ttotal: 1m 8s\tremaining: 44.1s\n",
      "6200:\tlearn: 0.3627591\ttotal: 1m 10s\tremaining: 43.1s\n",
      "6300:\tlearn: 0.3580497\ttotal: 1m 11s\tremaining: 42.1s\n",
      "6400:\tlearn: 0.3535671\ttotal: 1m 13s\tremaining: 41.1s\n",
      "6500:\tlearn: 0.3490754\ttotal: 1m 14s\tremaining: 40.1s\n",
      "6600:\tlearn: 0.3446456\ttotal: 1m 15s\tremaining: 39.1s\n",
      "6700:\tlearn: 0.3403833\ttotal: 1m 17s\tremaining: 38s\n",
      "6800:\tlearn: 0.3361176\ttotal: 1m 18s\tremaining: 37s\n",
      "6900:\tlearn: 0.3317124\ttotal: 1m 20s\tremaining: 35.9s\n",
      "7000:\tlearn: 0.3276861\ttotal: 1m 21s\tremaining: 34.9s\n",
      "7100:\tlearn: 0.3235431\ttotal: 1m 22s\tremaining: 33.8s\n",
      "7200:\tlearn: 0.3192942\ttotal: 1m 24s\tremaining: 32.7s\n",
      "7300:\tlearn: 0.3152327\ttotal: 1m 25s\tremaining: 31.6s\n",
      "7400:\tlearn: 0.3115130\ttotal: 1m 26s\tremaining: 30.5s\n",
      "7500:\tlearn: 0.3077057\ttotal: 1m 28s\tremaining: 29.4s\n",
      "7600:\tlearn: 0.3038869\ttotal: 1m 29s\tremaining: 28.3s\n",
      "7700:\tlearn: 0.3001672\ttotal: 1m 31s\tremaining: 27.2s\n",
      "7800:\tlearn: 0.2965141\ttotal: 1m 32s\tremaining: 26.1s\n",
      "7900:\tlearn: 0.2929207\ttotal: 1m 33s\tremaining: 25s\n",
      "8000:\tlearn: 0.2895080\ttotal: 1m 35s\tremaining: 23.8s\n",
      "8100:\tlearn: 0.2861348\ttotal: 1m 36s\tremaining: 22.7s\n",
      "8200:\tlearn: 0.2826656\ttotal: 1m 38s\tremaining: 21.5s\n",
      "8300:\tlearn: 0.2792850\ttotal: 1m 39s\tremaining: 20.4s\n",
      "8400:\tlearn: 0.2760084\ttotal: 1m 40s\tremaining: 19.2s\n",
      "8500:\tlearn: 0.2726935\ttotal: 1m 42s\tremaining: 18s\n",
      "8600:\tlearn: 0.2694589\ttotal: 1m 43s\tremaining: 16.9s\n",
      "8700:\tlearn: 0.2664991\ttotal: 1m 45s\tremaining: 15.7s\n",
      "8800:\tlearn: 0.2634787\ttotal: 1m 46s\tremaining: 14.5s\n",
      "8900:\tlearn: 0.2605347\ttotal: 1m 47s\tremaining: 13.3s\n",
      "9000:\tlearn: 0.2576206\ttotal: 1m 49s\tremaining: 12.1s\n",
      "9100:\tlearn: 0.2548286\ttotal: 1m 50s\tremaining: 10.9s\n",
      "9200:\tlearn: 0.2519945\ttotal: 1m 52s\tremaining: 9.73s\n",
      "9300:\tlearn: 0.2491096\ttotal: 1m 53s\tremaining: 8.53s\n",
      "9400:\tlearn: 0.2464132\ttotal: 1m 54s\tremaining: 7.32s\n",
      "9500:\tlearn: 0.2436910\ttotal: 1m 56s\tremaining: 6.11s\n",
      "9600:\tlearn: 0.2408744\ttotal: 1m 57s\tremaining: 4.89s\n",
      "9700:\tlearn: 0.2382253\ttotal: 1m 59s\tremaining: 3.67s\n",
      "9800:\tlearn: 0.2356644\ttotal: 2m\tremaining: 2.44s\n",
      "9900:\tlearn: 0.2332626\ttotal: 2m 1s\tremaining: 1.22s\n",
      "9999:\tlearn: 0.2308108\ttotal: 2m 3s\tremaining: 0us\n"
     ]
    }
   ],
   "source": [
    "# 使用最佳参数训练最终模型\n",
    "print(\"\\n训练最终 LightGBM 模型...\")\n",
    "lgb_model, lgb_test_pred = train_final_model(\n",
    "    \"lgb\", train_X, train_y_converted, test_X, \n",
    "    lgb_study.best_params, n_classes, seed\n",
    ")\n",
    "\n",
    "print(\"\\n训练最终 XGBoost 模型...\")\n",
    "xgb_model, xgb_test_pred = train_final_model(\n",
    "    \"xgb\", train_X, train_y_converted, test_X, \n",
    "    xgb_study.best_params, n_classes, seed\n",
    ")\n",
    "\n",
    "print(\"\\n训练最终 CatBoost 模型...\")\n",
    "cat_model, cat_test_pred = train_final_model(\n",
    "    \"cat\", train_X, train_y_converted, test_X, \n",
    "    cat_study.best_params, n_classes, seed\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
