{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
    "execution": {
     "iopub.execute_input": "2025-07-01T04:45:43.067090Z",
     "iopub.status.busy": "2025-07-01T04:45:43.066749Z",
     "iopub.status.idle": "2025-07-01T04:45:43.074139Z",
     "shell.execute_reply": "2025-07-01T04:45:43.073260Z",
     "shell.execute_reply.started": "2025-07-01T04:45:43.067068Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/kaggle/input/playground-series-s5e7/sample_submission.csv\n",
      "/kaggle/input/playground-series-s5e7/train.csv\n",
      "/kaggle/input/playground-series-s5e7/test.csv\n"
     ]
    }
   ],
   "source": [
    "# This Python 3 environment comes with many helpful analytics libraries installed\n",
    "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n",
    "# For example, here's several helpful packages to load\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "\n",
    "# Input data files are available in the read-only \"../input/\" directory\n",
    "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n",
    "\n",
    "import os\n",
    "for dirname, _, filenames in os.walk('/kaggle/input'):\n",
    "    for filename in filenames:\n",
    "        print(os.path.join(dirname, filename))\n",
    "\n",
    "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n",
    "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T04:45:43.076042Z",
     "iopub.status.busy": "2025-07-01T04:45:43.075726Z",
     "iopub.status.idle": "2025-07-01T04:45:46.896164Z",
     "shell.execute_reply": "2025-07-01T04:45:46.894998Z",
     "shell.execute_reply.started": "2025-07-01T04:45:43.076018Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: flaml in /usr/local/lib/python3.11/dist-packages (2.3.5)\n",
      "Requirement already satisfied: NumPy>=1.17 in /usr/local/lib/python3.11/dist-packages (from flaml) (1.26.4)\n",
      "Requirement already satisfied: mkl_fft in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (1.3.8)\n",
      "Requirement already satisfied: mkl_random in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (1.2.4)\n",
      "Requirement already satisfied: mkl_umath in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (0.1.1)\n",
      "Requirement already satisfied: mkl in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (2025.1.0)\n",
      "Requirement already satisfied: tbb4py in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (2022.1.0)\n",
      "Requirement already satisfied: mkl-service in /usr/local/lib/python3.11/dist-packages (from NumPy>=1.17->flaml) (2.4.1)\n",
      "Requirement already satisfied: intel-openmp<2026,>=2024 in /usr/local/lib/python3.11/dist-packages (from mkl->NumPy>=1.17->flaml) (2024.2.0)\n",
      "Requirement already satisfied: tbb==2022.* in /usr/local/lib/python3.11/dist-packages (from mkl->NumPy>=1.17->flaml) (2022.1.0)\n",
      "Requirement already satisfied: tcmlib==1.* in /usr/local/lib/python3.11/dist-packages (from tbb==2022.*->mkl->NumPy>=1.17->flaml) (1.3.0)\n",
      "Requirement already satisfied: intel-cmplr-lib-rt in /usr/local/lib/python3.11/dist-packages (from mkl_umath->NumPy>=1.17->flaml) (2024.2.0)\n",
      "Requirement already satisfied: intel-cmplr-lib-ur==2024.2.0 in /usr/local/lib/python3.11/dist-packages (from intel-openmp<2026,>=2024->mkl->NumPy>=1.17->flaml) (2024.2.0)\n"
     ]
    }
   ],
   "source": [
    "# !pip install --upgrade scikit-learn \n",
    "!pip install flaml "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:38:52.825163Z",
     "iopub.status.busy": "2025-07-01T05:38:52.824839Z",
     "iopub.status.idle": "2025-07-01T05:38:52.870372Z",
     "shell.execute_reply": "2025-07-01T05:38:52.869474Z",
     "shell.execute_reply.started": "2025-07-01T05:38:52.825141Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# 导入必要库\n",
    "import warnings\n",
    "from collections import Counter  # 用于模型融合的投票统计\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "from catboost import CatBoostClassifier  # 集成学习模型\n",
    "from flaml import AutoML  # 自动化机器学习工具\n",
    "from lightgbm import LGBMClassifier  # 轻量级梯度提升机\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score, classification_report  # 评估指标\n",
    "from sklearn.model_selection import KFold  # 交叉验证工具\n",
    "from sklearn.neural_network import MLPClassifier\n",
    "from sklearn.pipeline import Pipeline  # 管道工具（当前未直接使用）\n",
    "from sklearn.preprocessing import LabelEncoder, StandardScaler  # 标签编码和标准化\n",
    "from xgboost import XGBClassifier  # 极端梯度提升机\n",
    "\n",
    "warnings.filterwarnings('ignore')  # 关闭警告提示\n",
    "\n",
    "# 配置flaml日志级别（避免训练信息过多）\n",
    "import logging\n",
    "\n",
    "# 设置flaml的日志级别为WARNING（仅显示警告及以上日志）\n",
    "logging.getLogger('flaml.automl.logger').setLevel(logging.WARNING)\n",
    "\n",
    "# 加载数据（训练集、测试集、提交样例）\n",
    "# train = pd.read_csv('/kaggle/input/playground-series-s5e7/train.csv', index_col='id')  # 训练集（含目标变量）\n",
    "# test = pd.read_csv('/kaggle/input/playground-series-s5e7/test.csv', index_col='id')    # 测试集（需预测）\n",
    "# sub = pd.read_csv('/kaggle/input/playground-series-s5e7/sample_submission.csv', index_col='id')  # 提交格式样例\n",
    "train = pd.read_csv('./train.csv', index_col='id')  # 训练集（含目标变量）\n",
    "test = pd.read_csv('./test.csv', index_col='id')    # 测试集（需预测）\n",
    "sub = pd.read_csv('./sample_submission.csv', index_col='id')  # 提交格式样例\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:39:02.827786Z",
     "iopub.status.busy": "2025-07-01T05:39:02.827439Z",
     "iopub.status.idle": "2025-07-01T05:39:02.937002Z",
     "shell.execute_reply": "2025-07-01T05:39:02.936035Z",
     "shell.execute_reply.started": "2025-07-01T05:39:02.827758Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train shape: (18524, 8)\n",
      "Test shape: (6175, 7)\n",
      "\n",
      "Train columns: ['Time_spent_Alone', 'Stage_fear', 'Social_event_attendance', 'Going_outside', 'Drained_after_socializing', 'Friends_circle_size', 'Post_frequency', 'Personality']\n",
      "\n",
      "Train info:\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Index: 18524 entries, 0 to 18523\n",
      "Data columns (total 8 columns):\n",
      " #   Column                     Non-Null Count  Dtype  \n",
      "---  ------                     --------------  -----  \n",
      " 0   Time_spent_Alone           18524 non-null  float64\n",
      " 1   Stage_fear                 18524 non-null  object \n",
      " 2   Social_event_attendance    18524 non-null  float64\n",
      " 3   Going_outside              18524 non-null  float64\n",
      " 4   Drained_after_socializing  18524 non-null  object \n",
      " 5   Friends_circle_size        18524 non-null  float64\n",
      " 6   Post_frequency             18524 non-null  float64\n",
      " 7   Personality                18524 non-null  object \n",
      "dtypes: float64(5), object(3)\n",
      "memory usage: 1.3+ MB\n",
      "None\n",
      "\n",
      "Train missing values:\n",
      "Time_spent_Alone             0\n",
      "Stage_fear                   0\n",
      "Social_event_attendance      0\n",
      "Going_outside                0\n",
      "Drained_after_socializing    0\n",
      "Friends_circle_size          0\n",
      "Post_frequency               0\n",
      "Personality                  0\n",
      "dtype: int64\n",
      "\n",
      "Train target value counts:\n",
      "Personality\n",
      "extrovert    13699\n",
      "introvert     4825\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# 第一步：数据概览（查看数据基本信息）\n",
    "def load_and_overview_data(train, test):\n",
    "    \"\"\"打印数据基本信息（形状、列名、缺失值、目标分布）\"\"\"\n",
    "    print('Train shape:', train.shape)  # 训练集维度（样本数×特征数）\n",
    "    print('Test shape:', test.shape)    # 测试集维度\n",
    "    print('\\nTrain columns:', train.columns.tolist())  # 训练集特征列表\n",
    "    print('\\nTrain info:')\n",
    "    print(train.info())  # 数据类型和非空值统计\n",
    "    print('\\nTrain missing values:')\n",
    "    print(train.isnull().sum())  # 各特征缺失值数量\n",
    "    print('\\nTrain target value counts:')\n",
    "    print(train['Personality'].value_counts())  # 目标变量类别分布\n",
    "\n",
    "\n",
    "# 第二步：数据预处理（处理缺失值和类别特征）\n",
    "def preprocess_data(train, test):\n",
    "    \"\"\"\n",
    "    处理数值型和类别型特征的缺失值，统一类别格式\n",
    "    :return: 处理后的训练集、测试集、填充信息（用于复现）\n",
    "    \"\"\"\n",
    "    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']  # 数值特征列表\n",
    "    cat_cols_train = ['Stage_fear', 'Drained_after_socializing', 'Personality']  # 训练集类别特征（含目标）\n",
    "    cat_cols_test = ['Stage_fear', 'Drained_after_socializing']  # 测试集类别特征（不含目标）\n",
    "    \n",
    "    # 数值特征用训练集中位数填充（避免数据泄露）\n",
    "    num_medians = train[num_cols].median()\n",
    "    train[num_cols] = train[num_cols].fillna(num_medians)\n",
    "    test[num_cols] = test[num_cols].fillna(num_medians)\n",
    "    \n",
    "    # 类别特征统一格式（转小写、去空格）并填充缺失值为'missing'\n",
    "    for col in cat_cols_train:\n",
    "        train[col] = train[col].astype(str).str.strip().str.lower()  # 统一格式\n",
    "        train[col] = train[col].replace('nan', np.nan).fillna('missing')  # 填充缺失值\n",
    "    for col in cat_cols_test:\n",
    "        test[col] = test[col].astype(str).str.strip().str.lower()\n",
    "        test[col] = test[col].replace('nan', np.nan).fillna('missing')\n",
    "    \n",
    "    # 记录填充信息（用于后续可能的验证）\n",
    "    fill_info = {'num_medians': num_medians.to_dict(), 'cat_fill': 'missing'}\n",
    "    return train, test, fill_info\n",
    "\n",
    "\n",
    "# 第三步：特征工程（数值标准化+类别独热编码）\n",
    "def build_features(train, test, target_col='Personality'):\n",
    "    \"\"\"\n",
    "    构造模型输入特征（数值标准化+类别独热编码）\n",
    "    :return: 训练集特征、训练集目标、测试集特征\n",
    "    \"\"\"\n",
    "    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']\n",
    "    cat_cols = ['Stage_fear', 'Drained_after_socializing']  # 类别特征（不含目标）\n",
    "    \n",
    "    # 1. 数值特征标准化（Z-score标准化）\n",
    "    scaler = StandardScaler()\n",
    "    train_num = scaler.fit_transform(train[num_cols])  # 训练集拟合+转换\n",
    "    test_num = scaler.transform(test[num_cols])        # 测试集仅用训练集参数转换\n",
    "    \n",
    "    # 2. 类别特征独热编码（避免类别顺序影响模型）\n",
    "    train_cat = pd.get_dummies(train[cat_cols], prefix=cat_cols)  # 训练集独热编码\n",
    "    test_cat = pd.get_dummies(test[cat_cols], prefix=cat_cols)    # 测试集独热编码\n",
    "    # 对齐训练集和测试集的特征列（处理测试集可能缺失的类别）\n",
    "    train_cat, test_cat = train_cat.align(test_cat, join='left', axis=1, fill_value=0)\n",
    "    \n",
    "    # 3. 合并数值和类别特征（水平拼接）\n",
    "    X_train = np.hstack([train_num, train_cat.values])  # 训练集特征矩阵\n",
    "    X_test = np.hstack([test_num, test_cat.values])     # 测试集特征矩阵\n",
    "    y_train = train[target_col].values                  # 训练集目标变量\n",
    "    return X_train, y_train, X_test\n",
    "\n",
    "\n",
    "# -------------------- 数据处理流程 --------------------\n",
    "# 1. 数据概览（打印基本信息）\n",
    "load_and_overview_data(train, test)\n",
    "\n",
    "# 2. 预处理数据（填充缺失值）\n",
    "train, test, fill_info = preprocess_data(train, test)\n",
    "\n",
    "# 3. 特征工程（构造模型输入）\n",
    "X_train, y_train, X_test = build_features(train, test, target_col='Personality')\n",
    "\n",
    "# 4. 目标变量标签编码（将文本类别转为数值）\n",
    "le = LabelEncoder()\n",
    "y_train_enc = le.fit_transform(y_train)  # 训练集目标编码\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**使用 AutoML 和 LGBM**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:39:07.105168Z",
     "iopub.status.busy": "2025-07-01T05:39:07.104792Z",
     "iopub.status.idle": "2025-07-01T05:41:10.951290Z",
     "shell.execute_reply": "2025-07-01T05:41:10.950328Z",
     "shell.execute_reply.started": "2025-07-01T05:39:07.105143Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[flaml.automl.logger: 07-01 05:39:07] {1752} INFO - task = classification\n",
      "[flaml.automl.logger: 07-01 05:39:07] {1763} INFO - Evaluation method: cv\n",
      "[flaml.automl.logger: 07-01 05:39:07] {1862} INFO - Minimizing error metric: 1-accuracy\n",
      "[flaml.automl.logger: 07-01 05:39:07] {1979} INFO - List of ML learners in AutoML Run: ['lgbm', 'xgboost', 'rf', 'extra_tree']\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 0, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2417} INFO - Estimated sufficient time budget=1282s. Estimated necessary time budget=1s.\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.1s,\testimator lgbm's best error=0.0320,\tbest estimator lgbm's best error=0.0320\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 1, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.3s,\testimator lgbm's best error=0.0320,\tbest estimator lgbm's best error=0.0320\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 2, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.4s,\testimator lgbm's best error=0.0320,\tbest estimator lgbm's best error=0.0320\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 3, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.5s,\testimator xgboost's best error=0.0319,\tbest estimator xgboost's best error=0.0319\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 4, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.7s,\testimator lgbm's best error=0.0314,\tbest estimator lgbm's best error=0.0314\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 5, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2466} INFO -  at 0.8s,\testimator xgboost's best error=0.0319,\tbest estimator lgbm's best error=0.0314\n",
      "[flaml.automl.logger: 07-01 05:39:07] {2282} INFO - iteration 6, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.0s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 7, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.1s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 8, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.2s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 9, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.4s,\testimator xgboost's best error=0.0319,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 10, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.6s,\testimator extra_tree's best error=0.0317,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 11, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.7s,\testimator xgboost's best error=0.0319,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 12, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2466} INFO -  at 1.8s,\testimator xgboost's best error=0.0319,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:08] {2282} INFO - iteration 13, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2466} INFO -  at 1.9s,\testimator xgboost's best error=0.0319,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2282} INFO - iteration 14, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2466} INFO -  at 2.2s,\testimator extra_tree's best error=0.0313,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2282} INFO - iteration 15, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2466} INFO -  at 2.4s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2282} INFO - iteration 16, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2466} INFO -  at 2.6s,\testimator extra_tree's best error=0.0313,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2282} INFO - iteration 17, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2466} INFO -  at 2.8s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:09] {2282} INFO - iteration 18, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2466} INFO -  at 3.1s,\testimator extra_tree's best error=0.0313,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2282} INFO - iteration 19, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2466} INFO -  at 3.3s,\testimator rf's best error=0.0328,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2282} INFO - iteration 20, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2466} INFO -  at 3.6s,\testimator rf's best error=0.0317,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2282} INFO - iteration 21, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2466} INFO -  at 3.8s,\testimator rf's best error=0.0317,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:10] {2282} INFO - iteration 22, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:11] {2466} INFO -  at 4.2s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:11] {2282} INFO - iteration 23, current learner lgbm\n",
      "[flaml.automl.logger: 07-01 05:39:11] {2466} INFO -  at 4.8s,\testimator lgbm's best error=0.0311,\tbest estimator lgbm's best error=0.0311\n",
      "[flaml.automl.logger: 07-01 05:39:11] {2282} INFO - iteration 24, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2466} INFO -  at 5.0s,\testimator extra_tree's best error=0.0310,\tbest estimator extra_tree's best error=0.0310\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2282} INFO - iteration 25, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2466} INFO -  at 5.3s,\testimator extra_tree's best error=0.0310,\tbest estimator extra_tree's best error=0.0310\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2282} INFO - iteration 26, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2466} INFO -  at 5.5s,\testimator xgboost's best error=0.0319,\tbest estimator extra_tree's best error=0.0310\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2282} INFO - iteration 27, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2466} INFO -  at 5.8s,\testimator extra_tree's best error=0.0310,\tbest estimator extra_tree's best error=0.0310\n",
      "[flaml.automl.logger: 07-01 05:39:12] {2282} INFO - iteration 28, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2466} INFO -  at 6.2s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2282} INFO - iteration 29, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2466} INFO -  at 6.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2282} INFO - iteration 30, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2466} INFO -  at 6.6s,\testimator xgboost's best error=0.0317,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2282} INFO - iteration 31, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2466} INFO -  at 6.8s,\testimator xgboost's best error=0.0317,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:13] {2282} INFO - iteration 32, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2466} INFO -  at 7.2s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2282} INFO - iteration 33, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2466} INFO -  at 7.6s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2282} INFO - iteration 34, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2466} INFO -  at 7.7s,\testimator xgboost's best error=0.0316,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:14] {2282} INFO - iteration 35, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2466} INFO -  at 8.0s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2282} INFO - iteration 36, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2466} INFO -  at 8.5s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2282} INFO - iteration 37, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2466} INFO -  at 8.9s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:15] {2282} INFO - iteration 38, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:17] {2466} INFO -  at 9.9s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:17] {2282} INFO - iteration 39, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:17] {2466} INFO -  at 10.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:17] {2282} INFO - iteration 40, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2466} INFO -  at 11.0s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2282} INFO - iteration 41, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2466} INFO -  at 11.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2282} INFO - iteration 42, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2466} INFO -  at 11.5s,\testimator xgboost's best error=0.0316,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:18] {2282} INFO - iteration 43, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:19] {2466} INFO -  at 12.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:19] {2282} INFO - iteration 44, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:20] {2466} INFO -  at 12.9s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:20] {2282} INFO - iteration 45, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:20] {2466} INFO -  at 13.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:20] {2282} INFO - iteration 46, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2466} INFO -  at 14.0s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2282} INFO - iteration 47, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2466} INFO -  at 14.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2282} INFO - iteration 48, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2466} INFO -  at 14.5s,\testimator xgboost's best error=0.0313,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2282} INFO - iteration 49, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2466} INFO -  at 14.7s,\testimator xgboost's best error=0.0313,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:21] {2282} INFO - iteration 50, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2466} INFO -  at 15.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2282} INFO - iteration 51, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2466} INFO -  at 15.6s,\testimator xgboost's best error=0.0313,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2282} INFO - iteration 52, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2466} INFO -  at 15.7s,\testimator xgboost's best error=0.0313,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2282} INFO - iteration 53, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:22] {2466} INFO -  at 15.9s,\testimator xgboost's best error=0.0311,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2282} INFO - iteration 54, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2466} INFO -  at 16.0s,\testimator xgboost's best error=0.0311,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2282} INFO - iteration 55, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2466} INFO -  at 16.4s,\testimator extra_tree's best error=0.0309,\tbest estimator extra_tree's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2282} INFO - iteration 56, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2466} INFO -  at 16.6s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2282} INFO - iteration 57, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2466} INFO -  at 16.7s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:23] {2282} INFO - iteration 58, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2466} INFO -  at 17.0s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2282} INFO - iteration 59, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2466} INFO -  at 17.2s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2282} INFO - iteration 60, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2466} INFO -  at 17.4s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2282} INFO - iteration 61, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2466} INFO -  at 17.6s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2282} INFO - iteration 62, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2466} INFO -  at 17.8s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:24] {2282} INFO - iteration 63, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2466} INFO -  at 17.9s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2282} INFO - iteration 64, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2466} INFO -  at 18.2s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2282} INFO - iteration 65, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2466} INFO -  at 18.4s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2282} INFO - iteration 66, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2466} INFO -  at 18.6s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2282} INFO - iteration 67, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2466} INFO -  at 18.7s,\testimator xgboost's best error=0.0309,\tbest estimator xgboost's best error=0.0309\n",
      "[flaml.automl.logger: 07-01 05:39:25] {2282} INFO - iteration 68, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:26] {2466} INFO -  at 19.2s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:26] {2282} INFO - iteration 69, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:27] {2466} INFO -  at 20.0s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:27] {2282} INFO - iteration 70, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:27] {2466} INFO -  at 20.5s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:27] {2282} INFO - iteration 71, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:28] {2466} INFO -  at 21.3s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:28] {2282} INFO - iteration 72, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:28] {2466} INFO -  at 21.6s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:28] {2282} INFO - iteration 73, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2466} INFO -  at 24.3s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2282} INFO - iteration 74, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2466} INFO -  at 24.4s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2282} INFO - iteration 75, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2466} INFO -  at 24.8s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:31] {2282} INFO - iteration 76, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:32] {2466} INFO -  at 25.3s,\testimator xgboost's best error=0.0307,\tbest estimator xgboost's best error=0.0307\n",
      "[flaml.automl.logger: 07-01 05:39:32] {2282} INFO - iteration 77, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2466} INFO -  at 26.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2282} INFO - iteration 78, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2466} INFO -  at 26.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2282} INFO - iteration 79, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2466} INFO -  at 26.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:33] {2282} INFO - iteration 80, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:34] {2466} INFO -  at 27.2s,\testimator rf's best error=0.0316,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:34] {2282} INFO - iteration 81, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:35] {2466} INFO -  at 28.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:35] {2282} INFO - iteration 82, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2466} INFO -  at 29.0s,\testimator rf's best error=0.0316,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2282} INFO - iteration 83, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2466} INFO -  at 29.4s,\testimator rf's best error=0.0311,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2282} INFO - iteration 84, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2466} INFO -  at 29.8s,\testimator rf's best error=0.0311,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:36] {2282} INFO - iteration 85, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:37] {2466} INFO -  at 30.8s,\testimator rf's best error=0.0311,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:37] {2282} INFO - iteration 86, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:38] {2466} INFO -  at 31.3s,\testimator rf's best error=0.0311,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:38] {2282} INFO - iteration 87, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:39] {2466} INFO -  at 32.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:39] {2282} INFO - iteration 88, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:39] {2466} INFO -  at 32.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:39] {2282} INFO - iteration 89, current learner rf\n",
      "[flaml.automl.logger: 07-01 05:39:40] {2466} INFO -  at 33.6s,\testimator rf's best error=0.0311,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:40] {2282} INFO - iteration 90, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:42] {2466} INFO -  at 35.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:42] {2282} INFO - iteration 91, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:42] {2466} INFO -  at 35.6s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:42] {2282} INFO - iteration 92, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:43] {2466} INFO -  at 35.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:43] {2282} INFO - iteration 93, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:43] {2466} INFO -  at 36.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:43] {2282} INFO - iteration 94, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:44] {2466} INFO -  at 37.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:44] {2282} INFO - iteration 95, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:44] {2466} INFO -  at 37.7s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:44] {2282} INFO - iteration 96, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:46] {2466} INFO -  at 39.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:46] {2282} INFO - iteration 97, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:46] {2466} INFO -  at 39.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:46] {2282} INFO - iteration 98, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:47] {2466} INFO -  at 40.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:47] {2282} INFO - iteration 99, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:48] {2466} INFO -  at 41.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:48] {2282} INFO - iteration 100, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:49] {2466} INFO -  at 42.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:49] {2282} INFO - iteration 101, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:49] {2466} INFO -  at 42.6s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:49] {2282} INFO - iteration 102, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:50] {2466} INFO -  at 43.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:50] {2282} INFO - iteration 103, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:51] {2466} INFO -  at 44.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:51] {2282} INFO - iteration 104, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:52] {2466} INFO -  at 45.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:52] {2282} INFO - iteration 105, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:53] {2466} INFO -  at 46.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:53] {2282} INFO - iteration 106, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:53] {2466} INFO -  at 46.8s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:53] {2282} INFO - iteration 107, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:54] {2466} INFO -  at 47.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:54] {2282} INFO - iteration 108, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:54] {2466} INFO -  at 47.6s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:54] {2282} INFO - iteration 109, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:55] {2466} INFO -  at 48.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:55] {2282} INFO - iteration 110, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2466} INFO -  at 50.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2282} INFO - iteration 111, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2466} INFO -  at 50.6s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2282} INFO - iteration 112, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2466} INFO -  at 50.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:57] {2282} INFO - iteration 113, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:39:58] {2466} INFO -  at 51.4s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:58] {2282} INFO - iteration 114, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:59] {2466} INFO -  at 52.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:59] {2282} INFO - iteration 115, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:39:59] {2466} INFO -  at 52.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:39:59] {2282} INFO - iteration 116, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:00] {2466} INFO -  at 53.4s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:00] {2282} INFO - iteration 117, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:00] {2466} INFO -  at 53.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:00] {2282} INFO - iteration 118, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:04] {2466} INFO -  at 57.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:04] {2282} INFO - iteration 119, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:05] {2466} INFO -  at 58.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:05] {2282} INFO - iteration 120, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:06] {2466} INFO -  at 58.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:06] {2282} INFO - iteration 121, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:06] {2466} INFO -  at 59.3s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:06] {2282} INFO - iteration 122, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:08] {2466} INFO -  at 61.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:08] {2282} INFO - iteration 123, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:08] {2466} INFO -  at 61.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:08] {2282} INFO - iteration 124, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:09] {2466} INFO -  at 62.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:09] {2282} INFO - iteration 125, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:09] {2466} INFO -  at 62.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:09] {2282} INFO - iteration 126, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:10] {2466} INFO -  at 63.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:10] {2282} INFO - iteration 127, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:11] {2466} INFO -  at 63.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:11] {2282} INFO - iteration 128, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:11] {2466} INFO -  at 64.4s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:11] {2282} INFO - iteration 129, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:12] {2466} INFO -  at 65.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:12] {2282} INFO - iteration 130, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:12] {2466} INFO -  at 65.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:12] {2282} INFO - iteration 131, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:13] {2466} INFO -  at 66.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:13] {2282} INFO - iteration 132, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:14] {2466} INFO -  at 67.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:14] {2282} INFO - iteration 133, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:14] {2466} INFO -  at 67.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:14] {2282} INFO - iteration 134, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:16] {2466} INFO -  at 68.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:16] {2282} INFO - iteration 135, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:16] {2466} INFO -  at 69.7s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:16] {2282} INFO - iteration 136, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:17] {2466} INFO -  at 70.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:17] {2282} INFO - iteration 137, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:18] {2466} INFO -  at 70.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:18] {2282} INFO - iteration 138, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:19] {2466} INFO -  at 72.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:19] {2282} INFO - iteration 139, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:20] {2466} INFO -  at 73.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:20] {2282} INFO - iteration 140, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:20] {2466} INFO -  at 73.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:20] {2282} INFO - iteration 141, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:21] {2466} INFO -  at 74.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:21] {2282} INFO - iteration 142, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:21] {2466} INFO -  at 74.4s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:21] {2282} INFO - iteration 143, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:22] {2466} INFO -  at 74.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:22] {2282} INFO - iteration 144, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:23] {2466} INFO -  at 76.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:23] {2282} INFO - iteration 145, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:24] {2466} INFO -  at 77.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:24] {2282} INFO - iteration 146, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:24] {2466} INFO -  at 77.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:24] {2282} INFO - iteration 147, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:25] {2466} INFO -  at 78.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:25] {2282} INFO - iteration 148, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:26] {2466} INFO -  at 79.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:26] {2282} INFO - iteration 149, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:26] {2466} INFO -  at 79.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:26] {2282} INFO - iteration 150, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:27] {2466} INFO -  at 80.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:27] {2282} INFO - iteration 151, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:28] {2466} INFO -  at 80.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:28] {2282} INFO - iteration 152, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:28] {2466} INFO -  at 81.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:28] {2282} INFO - iteration 153, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:30] {2466} INFO -  at 83.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:30] {2282} INFO - iteration 154, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:30] {2466} INFO -  at 83.6s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:30] {2282} INFO - iteration 155, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:31] {2466} INFO -  at 84.2s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:31] {2282} INFO - iteration 156, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:31] {2466} INFO -  at 84.6s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:31] {2282} INFO - iteration 157, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:32] {2466} INFO -  at 85.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:32] {2282} INFO - iteration 158, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:34] {2466} INFO -  at 86.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:34] {2282} INFO - iteration 159, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:34] {2466} INFO -  at 87.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:34] {2282} INFO - iteration 160, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:36] {2466} INFO -  at 89.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:36] {2282} INFO - iteration 161, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:36] {2466} INFO -  at 89.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:36] {2282} INFO - iteration 162, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:37] {2466} INFO -  at 90.1s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:37] {2282} INFO - iteration 163, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:37] {2466} INFO -  at 90.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:37] {2282} INFO - iteration 164, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:38] {2466} INFO -  at 91.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:38] {2282} INFO - iteration 165, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:38] {2466} INFO -  at 91.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:38] {2282} INFO - iteration 166, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:39] {2466} INFO -  at 92.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:39] {2282} INFO - iteration 167, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:39] {2466} INFO -  at 92.8s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:39] {2282} INFO - iteration 168, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:40] {2466} INFO -  at 93.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:40] {2282} INFO - iteration 169, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:41] {2466} INFO -  at 94.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:41] {2282} INFO - iteration 170, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:42] {2466} INFO -  at 95.4s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:42] {2282} INFO - iteration 171, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:43] {2466} INFO -  at 96.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:43] {2282} INFO - iteration 172, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:43] {2466} INFO -  at 96.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:43] {2282} INFO - iteration 173, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:44] {2466} INFO -  at 97.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:44] {2282} INFO - iteration 174, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:46] {2466} INFO -  at 99.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:46] {2282} INFO - iteration 175, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:46] {2466} INFO -  at 99.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:46] {2282} INFO - iteration 176, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2466} INFO -  at 102.1s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2282} INFO - iteration 177, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2466} INFO -  at 102.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2282} INFO - iteration 178, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2466} INFO -  at 102.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:49] {2282} INFO - iteration 179, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:51] {2466} INFO -  at 104.5s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:51] {2282} INFO - iteration 180, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:52] {2466} INFO -  at 105.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:52] {2282} INFO - iteration 181, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:52] {2466} INFO -  at 105.8s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:52] {2282} INFO - iteration 182, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:53] {2466} INFO -  at 106.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:53] {2282} INFO - iteration 183, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:55] {2466} INFO -  at 108.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:55] {2282} INFO - iteration 184, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:55] {2466} INFO -  at 108.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:55] {2282} INFO - iteration 185, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:56] {2466} INFO -  at 109.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:56] {2282} INFO - iteration 186, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:57] {2466} INFO -  at 110.4s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:57] {2282} INFO - iteration 187, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:58] {2466} INFO -  at 111.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:58] {2282} INFO - iteration 188, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:58] {2466} INFO -  at 111.6s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:58] {2282} INFO - iteration 189, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:40:59] {2466} INFO -  at 112.0s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:59] {2282} INFO - iteration 190, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:40:59] {2466} INFO -  at 112.2s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:40:59] {2282} INFO - iteration 191, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:41:01] {2466} INFO -  at 114.3s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:01] {2282} INFO - iteration 192, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:41:01] {2466} INFO -  at 114.7s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:01] {2282} INFO - iteration 193, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:41:02] {2466} INFO -  at 115.2s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:02] {2282} INFO - iteration 194, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:41:03] {2466} INFO -  at 116.9s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:03] {2282} INFO - iteration 195, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:41:04] {2466} INFO -  at 117.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:04] {2282} INFO - iteration 196, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:41:04] {2466} INFO -  at 117.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:04] {2282} INFO - iteration 197, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:41:05] {2466} INFO -  at 118.0s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:05] {2282} INFO - iteration 198, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:41:05] {2466} INFO -  at 118.5s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:05] {2282} INFO - iteration 199, current learner extra_tree\n",
      "[flaml.automl.logger: 07-01 05:41:06] {2466} INFO -  at 118.9s,\testimator extra_tree's best error=0.0309,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:06] {2282} INFO - iteration 200, current learner xgboost\n",
      "[flaml.automl.logger: 07-01 05:41:07] {2466} INFO -  at 120.0s,\testimator xgboost's best error=0.0306,\tbest estimator xgboost's best error=0.0306\n",
      "[flaml.automl.logger: 07-01 05:41:07] {2724} INFO - retrain xgboost for 0.1s\n",
      "[flaml.automl.logger: 07-01 05:41:07] {2727} INFO - retrained model: XGBClassifier(base_score=None, booster=None, callbacks=[],\n",
      "              colsample_bylevel=0.5192758471439969, colsample_bynode=None,\n",
      "              colsample_bytree=0.7671746261992868, device=None,\n",
      "              early_stopping_rounds=None, enable_categorical=False,\n",
      "              eval_metric=None, feature_types=None, gamma=None,\n",
      "              grow_policy='lossguide', importance_type=None,\n",
      "              interaction_constraints=None, learning_rate=0.08858393415261269,\n",
      "              max_bin=None, max_cat_threshold=None, max_cat_to_onehot=None,\n",
      "              max_delta_step=None, max_depth=0, max_leaves=15,\n",
      "              min_child_weight=0.013175257536552111, missing=nan,\n",
      "              monotone_constraints=None, multi_strategy=None, n_estimators=83,\n",
      "              n_jobs=-1, num_parallel_tree=None, random_state=None, ...)\n",
      "[flaml.automl.logger: 07-01 05:41:07] {2009} INFO - fit succeeded\n",
      "[flaml.automl.logger: 07-01 05:41:07] {2010} INFO - Time taken to find the best model: 26.011930227279663\n",
      "最佳模型： <flaml.automl.model.XGBoostSklearnEstimator object at 0x7ce72c87f150>\n",
      "最佳参数： {'n_estimators': 83, 'max_leaves': 15, 'min_child_weight': 0.013175257536552111, 'learning_rate': 0.08858393415261269, 'subsample': 0.8336156831709344, 'colsample_bylevel': 0.5192758471439969, 'colsample_bytree': 0.7671746261992868, 'reg_alpha': 1.6455347548766661, 'reg_lambda': 0.4985694185997577}\n",
      "最佳分数： 0.030608875986277352\n",
      "\n",
      "训练集评估：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "   extrovert       0.98      0.98      0.98     13699\n",
      "   introvert       0.95      0.94      0.94      4825\n",
      "\n",
      "    accuracy                           0.97     18524\n",
      "   macro avg       0.96      0.96      0.96     18524\n",
      "weighted avg       0.97      0.97      0.97     18524\n",
      "\n",
      "训练集准确率： 0.9696069963290866\n",
      "Fold 1 accuracy: 0.9687\n",
      "Fold 2 accuracy: 0.9671\n",
      "Fold 3 accuracy: 0.9695\n",
      "Fold 4 accuracy: 0.9679\n",
      "Fold 5 accuracy: 0.9719\n",
      "\n",
      "5折交叉验证平均准确率： 0.9690133291360983\n",
      "LGBM OOF分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "   extrovert       0.98      0.98      0.98     13699\n",
      "   introvert       0.94      0.94      0.94      4825\n",
      "\n",
      "    accuracy                           0.97     18524\n",
      "   macro avg       0.96      0.96      0.96     18524\n",
      "weighted avg       0.97      0.97      0.97     18524\n",
      "\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# -------------------- AutoML 自动化训练 --------------------\n",
    "# 初始化AutoML并设置参数（自动选择最优模型和超参数）\n",
    "automl = AutoML()\n",
    "settings = {\n",
    "    \"time_budget\": 120,  # 训练时间预算（秒）\n",
    "    \"task\": 'classification',  # 任务类型：分类\n",
    "    \"log_file_name\": 'flaml.log',  # 日志文件\n",
    "    \"metric\": 'accuracy',  # 优化指标：准确率\n",
    "    \"estimator_list\": ['lgbm', 'xgboost', 'rf', 'extra_tree'],  # 候选模型列表\n",
    "}\n",
    "automl.fit(X_train=X_train, y_train=y_train_enc, **settings)  # 启动自动训练\n",
    "\n",
    "# 输出AutoML结果\n",
    "print('最佳模型：', automl.model)          # 自动选择的最优模型\n",
    "print('最佳参数：', automl.best_config)    # 最优模型的超参数\n",
    "print('最佳分数：', automl.best_loss)      # 最优模型的损失值\n",
    "\n",
    "# 生成测试集预测（编码后转回原始类别）\n",
    "automl_preds_enc = automl.predict(X_test)\n",
    "automl_preds = le.inverse_transform(automl_preds_enc)  # 逆编码得到原始类别\n",
    "\n",
    "# 训练集评估（验证过拟合情况）\n",
    "automl_train_preds_enc = automl.predict(X_train)\n",
    "automl_train_preds = le.inverse_transform(automl_train_preds_enc)\n",
    "print('\\n训练集评估：')\n",
    "print(classification_report(y_train, automl_train_preds))  # 分类报告（精确率、召回率等）\n",
    "print('训练集准确率：', accuracy_score(y_train, automl_train_preds))  # 准确率\n",
    "\n",
    "# 保存AutoML预测结果（调整类别首字母大写，符合提交格式）\n",
    "df_automl_pred = pd.DataFrame({'id': test.index, 'Personality': automl_preds})\n",
    "df_automl_pred['Personality'] = df_automl_pred['Personality'].str.capitalize()\n",
    "# df_automl_pred.to_csv('submission_automl.csv', index=False)\n",
    "\n",
    "\n",
    "# -------------------- LGBM 模型（KFold交叉验证） --------------------\n",
    "# 定义LGBM参数（手动调参）\n",
    "lgbm_params = {\n",
    "    'n_estimators': 200,    # 迭代次数\n",
    "    'learning_rate': 0.05,  # 学习率\n",
    "    'max_depth': 7,         # 树最大深度\n",
    "    'random_state': 42      # 随机种子（保证可复现）\n",
    "}\n",
    "model = LGBMClassifier(**lgbm_params)\n",
    "\n",
    "# 5折交叉验证（评估模型泛化能力）\n",
    "kf = KFold(n_splits=5, shuffle=True, random_state=42)  # 5折，随机打乱数据\n",
    "lgbm_oof_preds = np.zeros(len(X_train), dtype=int)     # 保存各折验证集预测结果\n",
    "lgbm_oof_probs = np.zeros((len(X_train), len(le.classes_)))  # 保存概率值\n",
    "fold_scores = []  # 保存各折准确率\n",
    "\n",
    "for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):\n",
    "    # 划分训练/验证集\n",
    "    X_tr, X_val = X_train[train_idx], X_train[val_idx]\n",
    "    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]\n",
    "    \n",
    "    # 模型训练+验证\n",
    "    model.fit(X_tr, y_tr)\n",
    "    val_pred = model.predict(X_val)       # 验证集类别预测\n",
    "    val_prob = model.predict_proba(X_val) # 验证集概率预测\n",
    "    lgbm_oof_preds[val_idx] = val_pred    # 记录验证集预测结果\n",
    "    lgbm_oof_probs[val_idx] = val_prob    # 记录概率值\n",
    "    acc = accuracy_score(y_val, val_pred) # 计算当前折准确率\n",
    "    print(f\"Fold {fold+1} accuracy: {acc:.4f}\")\n",
    "    fold_scores.append(acc)\n",
    "\n",
    "# 输出交叉验证结果\n",
    "print(\"\\n5折交叉验证平均准确率：\", np.mean(fold_scores))\n",
    "print(\"LGBM OOF分类报告：\")\n",
    "print(classification_report(y_train, le.inverse_transform(lgbm_oof_preds)))  # OOF预测与真实值对比\n",
    "\n",
    "# 全量数据训练并生成测试集预测\n",
    "model.fit(X_train, y_train_enc)\n",
    "lgbm_preds_enc = model.predict(X_test)\n",
    "lgbm_preds = le.inverse_transform(lgbm_preds_enc)  # 逆编码得到原始类别\n",
    "\n",
    "# 保存LGBM预测结果\n",
    "df_lgbm_pred = pd.DataFrame({'id': test.index, 'Personality': lgbm_preds})\n",
    "df_lgbm_pred['Personality'] = df_lgbm_pred['Personality'].str.capitalize()\n",
    "# df_lgbm_pred.to_csv('submission_lgbm.csv', index=False)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:42:05.451301Z",
     "iopub.status.busy": "2025-07-01T05:42:05.450477Z",
     "iopub.status.idle": "2025-07-01T05:42:05.490639Z",
     "shell.execute_reply": "2025-07-01T05:42:05.489603Z",
     "shell.execute_reply.started": "2025-07-01T05:42:05.451268Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Submission file saved as 'submission.csv'\n",
      "\n",
      "AutoML测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n",
      "\n",
      "LGBM+KFold测试集预测分布:\n",
      "extrovert    4621\n",
      "introvert    1554\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "# -------------------- 二模型简单投票融合 --------------------\n",
    "# 对二个模型的预测结果进行多数投票（提升泛化能力）\n",
    "ensemble_preds = []\n",
    "for a, b in zip(automl_preds, lgbm_preds):\n",
    "    vote = Counter([a, b]).most_common(1)[0][0]  # 统计最多的类别\n",
    "    ensemble_preds.append(vote)\n",
    "ensemble_preds = np.array(ensemble_preds)\n",
    "\n",
    "# 保存融合预测结果\n",
    "ensemble_preds = pd.DataFrame({'id': test.index, 'Personality': ensemble_preds})\n",
    "ensemble_preds['Personality'] = ensemble_preds['Personality'].str.capitalize()\n",
    "ensemble_preds.to_csv('submission.csv', index=False)\n",
    "print(\"Submission file saved as 'submission.csv'\")\n",
    "\n",
    "# -------------------- 预测分布对比（分析模型差异） --------------------\n",
    "print('\\nAutoML测试集预测分布:')\n",
    "print(pd.Series(automl_preds).value_counts())\n",
    "print('\\nLGBM+KFold测试集预测分布:')\n",
    "print(pd.Series(lgbm_preds).value_counts())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**再增加 XGBoost 和 CatBosst**\n",
    "\n",
    "从数据上看， 准确度已经在下降。也说明后面的实验意义不大了。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:31:12.115858Z",
     "iopub.status.busy": "2025-07-01T05:31:12.115493Z",
     "iopub.status.idle": "2025-07-01T05:31:23.902207Z",
     "shell.execute_reply": "2025-07-01T05:31:23.901257Z",
     "shell.execute_reply.started": "2025-07-01T05:31:12.115823Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[XGB] Fold 1 accuracy: 0.9679\n",
      "[XGB] Fold 2 accuracy: 0.9671\n",
      "[XGB] Fold 3 accuracy: 0.9692\n",
      "[XGB] Fold 4 accuracy: 0.9679\n",
      "[XGB] Fold 5 accuracy: 0.9711\n",
      "\n",
      "[XGB] 5折交叉验证平均准确率： 0.9686354176686109\n",
      "[XGB] OOF分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "   extrovert       0.98      0.98      0.98     13699\n",
      "   introvert       0.95      0.93      0.94      4825\n",
      "\n",
      "    accuracy                           0.97     18524\n",
      "   macro avg       0.96      0.96      0.96     18524\n",
      "weighted avg       0.97      0.97      0.97     18524\n",
      "\n",
      "[CatBoost] Fold 1 accuracy: 0.9687\n",
      "[CatBoost] Fold 2 accuracy: 0.9663\n",
      "[CatBoost] Fold 3 accuracy: 0.9698\n",
      "[CatBoost] Fold 4 accuracy: 0.9676\n",
      "[CatBoost] Fold 5 accuracy: 0.9719\n",
      "\n",
      "[CatBoost] 5折交叉验证平均准确率： 0.9688513858162603\n",
      "[CatBoost] OOF分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "   extrovert       0.98      0.98      0.98     13699\n",
      "   introvert       0.94      0.93      0.94      4825\n",
      "\n",
      "    accuracy                           0.97     18524\n",
      "   macro avg       0.96      0.96      0.96     18524\n",
      "weighted avg       0.97      0.97      0.97     18524\n",
      "\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# # -------------------- XGBoost 模型（KFold交叉验证） --------------------\n",
    "# # 定义XGBoost参数（兼容sklearn接口）\n",
    "# xgb_params = {\n",
    "#     'n_estimators': 200,\n",
    "#     'learning_rate': 0.05,\n",
    "#     'max_depth': 7,\n",
    "#     'random_state': 42,\n",
    "#     'use_label_encoder': False,  # 关闭标签编码警告\n",
    "#     'eval_metric': 'mlogloss'    # 评估指标：多分类对数损失\n",
    "# }\n",
    "# xgb_model = XGBClassifier(**xgb_params)\n",
    "# xgb_oof_preds = np.zeros(len(X_train), dtype=int)\n",
    "# xgb_oof_probs = np.zeros((len(X_train), len(le.classes_)))\n",
    "# xgb_fold_scores = []\n",
    "\n",
    "# # 5折交叉验证（流程同LGBM）\n",
    "# for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):\n",
    "#     X_tr, X_val = X_train[train_idx], X_train[val_idx]\n",
    "#     y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]\n",
    "#     xgb_model.fit(X_tr, y_tr)\n",
    "#     val_pred = xgb_model.predict(X_val)\n",
    "#     val_prob = xgb_model.predict_proba(X_val)\n",
    "#     xgb_oof_preds[val_idx] = val_pred\n",
    "#     xgb_oof_probs[val_idx] = val_prob\n",
    "#     acc = accuracy_score(y_val, val_pred)\n",
    "#     print(f\"[XGB] Fold {fold+1} accuracy: {acc:.4f}\")\n",
    "#     xgb_fold_scores.append(acc)\n",
    "\n",
    "# # 输出交叉验证结果\n",
    "# print(\"\\n[XGB] 5折交叉验证平均准确率：\", np.mean(xgb_fold_scores))\n",
    "# print(\"[XGB] OOF分类报告：\")\n",
    "# print(classification_report(y_train, le.inverse_transform(xgb_oof_preds)))\n",
    "\n",
    "# # 全量数据训练并生成测试集预测\n",
    "# xgb_model.fit(X_train, y_train_enc)\n",
    "# xgb_preds_enc = xgb_model.predict(X_test)\n",
    "# xgb_preds = le.inverse_transform(xgb_preds_enc)\n",
    "\n",
    "# # 保存XGBoost预测结果\n",
    "# df_xgb_pred = pd.DataFrame({'id': test.index, 'Personality': xgb_preds})\n",
    "# df_xgb_pred['Personality'] = df_xgb_pred['Personality'].str.capitalize()\n",
    "# # df_xgb_pred.to_csv('submission_xgb.csv', index=False)\n",
    "\n",
    "\n",
    "# # -------------------- CatBoost 模型（KFold交叉验证） --------------------\n",
    "# # 定义CatBoost参数（默认处理类别特征，但此处已手动编码）\n",
    "# cat_params = {\n",
    "#     'iterations': 200,    # 迭代次数（类似n_estimators）\n",
    "#     'learning_rate': 0.05,\n",
    "#     'depth': 7,           # 树深度（类似max_depth）\n",
    "#     'random_seed': 42,    # 随机种子\n",
    "#     'verbose': 0          # 关闭训练日志输出\n",
    "# }\n",
    "# cat_model = CatBoostClassifier(**cat_params)\n",
    "# cat_oof_preds = np.zeros(len(X_train), dtype=int)\n",
    "# cat_oof_probs = np.zeros((len(X_train), len(le.classes_)))\n",
    "# cat_fold_scores = []\n",
    "\n",
    "# # 5折交叉验证（流程同LGBM）\n",
    "# for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):\n",
    "#     X_tr, X_val = X_train[train_idx], X_train[val_idx]\n",
    "#     y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]\n",
    "#     cat_model.fit(X_tr, y_tr)\n",
    "#     val_pred = cat_model.predict(X_val)\n",
    "#     val_prob = cat_model.predict_proba(X_val)\n",
    "#     cat_oof_preds[val_idx] = val_pred\n",
    "#     cat_oof_probs[val_idx] = val_prob\n",
    "#     acc = accuracy_score(y_val, val_pred)\n",
    "#     print(f\"[CatBoost] Fold {fold+1} accuracy: {acc:.4f}\")\n",
    "#     cat_fold_scores.append(acc)\n",
    "\n",
    "# # 输出交叉验证结果\n",
    "# print(\"\\n[CatBoost] 5折交叉验证平均准确率：\", np.mean(cat_fold_scores))\n",
    "# print(\"[CatBoost] OOF分类报告：\")\n",
    "# print(classification_report(y_train, le.inverse_transform(cat_oof_preds)))\n",
    "\n",
    "# # 全量数据训练并生成测试集预测\n",
    "# cat_model.fit(X_train, y_train_enc)\n",
    "# cat_preds_enc = cat_model.predict(X_test)\n",
    "# cat_preds = le.inverse_transform(cat_preds_enc)\n",
    "\n",
    "# # 保存CatBoost预测结果\n",
    "# df_cat_pred = pd.DataFrame({'id': test.index, 'Personality': cat_preds})\n",
    "# df_cat_pred['Personality'] = df_cat_pred['Personality'].str.capitalize()\n",
    "# # df_cat_pred.to_csv('submission_catboost.csv', index=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**做四个模型的简单投票**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:31:33.873896Z",
     "iopub.status.busy": "2025-07-01T05:31:33.873513Z",
     "iopub.status.idle": "2025-07-01T05:31:33.922095Z",
     "shell.execute_reply": "2025-07-01T05:31:33.921178Z",
     "shell.execute_reply.started": "2025-07-01T05:31:33.873868Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Submission file saved as 'submission.csv'\n",
      "\n",
      "AutoML测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n",
      "\n",
      "LGBM+KFold测试集预测分布:\n",
      "extrovert    4621\n",
      "introvert    1554\n",
      "Name: count, dtype: int64\n",
      "\n",
      "XGBoost+KFold测试集预测分布:\n",
      "extrovert    4614\n",
      "introvert    1561\n",
      "Name: count, dtype: int64\n",
      "\n",
      "CatBoost+KFold测试集预测分布:\n",
      "extrovert    4617\n",
      "introvert    1558\n",
      "Name: count, dtype: int64\n",
      "\n",
      "四模型融合后测试集预测分布:\n",
      "extrovert    4618\n",
      "introvert    1557\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "# # -------------------- 四模型简单投票融合 --------------------\n",
    "# # 对四个模型的预测结果进行多数投票（提升泛化能力）\n",
    "# ensemble4_preds = []\n",
    "# for a, b, c, d in zip(automl_preds, lgbm_preds, xgb_preds, cat_preds):\n",
    "#     vote = Counter([a, b, c, d]).most_common(1)[0][0]  # 统计最多的类别\n",
    "#     ensemble4_preds.append(vote)\n",
    "# ensemble4_preds = np.array(ensemble4_preds)\n",
    "\n",
    "# # 保存融合预测结果\n",
    "# df_ensemble4 = pd.DataFrame({'id': test.index, 'Personality': ensemble4_preds})\n",
    "# df_ensemble4['Personality'] = df_ensemble4['Personality'].str.capitalize()\n",
    "# df_ensemble4.to_csv('submission.csv', index=False)\n",
    "# print(\"Submission file saved as 'submission.csv'\")\n",
    "\n",
    "# # -------------------- 预测分布对比（分析模型差异） --------------------\n",
    "# print('\\nAutoML测试集预测分布:')\n",
    "# print(pd.Series(automl_preds).value_counts())\n",
    "# print('\\nLGBM+KFold测试集预测分布:')\n",
    "# print(pd.Series(lgbm_preds).value_counts())\n",
    "# print('\\nXGBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(xgb_preds).value_counts())\n",
    "# print('\\nCatBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(cat_preds).value_counts())\n",
    "# print('\\n四模型融合后测试集预测分布:')\n",
    "# print(pd.Series(ensemble4_preds).value_counts())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**做四个模型的 Stacking， 对比下来跟简单投票差不多。**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:08:00.416479Z",
     "iopub.status.busy": "2025-07-01T05:08:00.416110Z",
     "iopub.status.idle": "2025-07-01T05:08:01.161769Z",
     "shell.execute_reply": "2025-07-01T05:08:01.160981Z",
     "shell.execute_reply.started": "2025-07-01T05:08:00.416454Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "AutoML测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n",
      "\n",
      "LGBM+KFold测试集预测分布:\n",
      "extrovert    4621\n",
      "introvert    1554\n",
      "Name: count, dtype: int64\n",
      "\n",
      "XGBoost+KFold测试集预测分布:\n",
      "extrovert    4614\n",
      "introvert    1561\n",
      "Name: count, dtype: int64\n",
      "\n",
      "CatBoost+KFold测试集预测分布:\n",
      "extrovert    4617\n",
      "introvert    1558\n",
      "Name: count, dtype: int64\n",
      "\n",
      "Stacking融合后测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "# from sklearn.linear_model import LogisticRegression\n",
    "# # -------------------- 四模型Stacking融合 --------------------\n",
    "# # 1. 构造一级模型的预测概率作为二级模型输入特征\n",
    "# # 这里用各模型的predict_proba输出（每个样本两个概率，拼接）\n",
    "# meta_X_test = np.column_stack([\n",
    "#     model.predict_proba(X_test) if hasattr(model, 'predict_proba') else None\n",
    "#     for model in [automl.model, model, xgb_model, cat_model]\n",
    "# ])\n",
    "# meta_X_test = meta_X_test.reshape(X_test.shape[0], -1)\n",
    "\n",
    "# meta_X_train = np.column_stack([\n",
    "#     automl.model.predict_proba(X_train),\n",
    "#     model.predict_proba(X_train),\n",
    "#     xgb_model.predict_proba(X_train),\n",
    "#     cat_model.predict_proba(X_train)\n",
    "# ])\n",
    "\n",
    "# # 2. 用一级模型的oof概率作为训练集特征\n",
    "# oof_meta = np.column_stack([\n",
    "#     automl.model.predict_proba(X_train),\n",
    "#     lgbm_oof_probs,\n",
    "#     xgb_oof_probs,\n",
    "#     cat_oof_probs\n",
    "# ])\n",
    "\n",
    "# # 3. 训练二级融合模型（如逻辑回归）\n",
    "# meta_model = LogisticRegression(max_iter=1000)\n",
    "# meta_model.fit(oof_meta, y_train_enc)\n",
    "\n",
    "# # 4. 用融合模型预测测试集\n",
    "# meta_preds_enc = meta_model.predict(meta_X_test)\n",
    "# meta_preds = le.inverse_transform(meta_preds_enc)\n",
    "\n",
    "# # 5. 保存融合预测结果\n",
    "# stacking_df = pd.DataFrame({'id': test.index, 'Personality': meta_preds})\n",
    "# stacking_df['Personality'] = stacking_df['Personality'].str.capitalize()\n",
    "# stacking_df.to_csv('submission.csv', index=False)\n",
    "\n",
    "\n",
    "\n",
    "# # -------------------- 预测分布对比（分析模型差异） --------------------\n",
    "# print('\\nAutoML测试集预测分布:')\n",
    "# print(pd.Series(automl_preds).value_counts())\n",
    "# print('\\nLGBM+KFold测试集预测分布:')\n",
    "# print(pd.Series(lgbm_preds).value_counts())\n",
    "# print('\\nXGBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(xgb_preds).value_counts())\n",
    "# print('\\nCatBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(cat_preds).value_counts())\n",
    "# print('\\nStacking融合后测试集预测分布:')\n",
    "# print(pd.Series(meta_preds).value_counts())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**增加了 ANN， 做五个模型的 Stacking， 从数据上看 ANN 的结果不好， 0.958 比其它的 0.968 差。**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-07-01T05:23:03.675116Z",
     "iopub.status.busy": "2025-07-01T05:23:03.674754Z",
     "iopub.status.idle": "2025-07-01T05:28:20.663765Z",
     "shell.execute_reply": "2025-07-01T05:28:20.661457Z",
     "shell.execute_reply.started": "2025-07-01T05:23:03.675090Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ANN] Fold 1 accuracy: 0.9633\n",
      "[ANN] Fold 2 accuracy: 0.9582\n",
      "[ANN] Fold 3 accuracy: 0.9579\n",
      "[ANN] Fold 4 accuracy: 0.9579\n",
      "[ANN] Fold 5 accuracy: 0.9617\n",
      "\n",
      "[ANN] 5折交叉验证平均准确率： 0.9597820061034794\n",
      "[ANN] OOF分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "   extrovert       0.97      0.98      0.97     13699\n",
      "   introvert       0.93      0.91      0.92      4825\n",
      "\n",
      "    accuracy                           0.96     18524\n",
      "   macro avg       0.95      0.95      0.95     18524\n",
      "weighted avg       0.96      0.96      0.96     18524\n",
      "\n",
      "\n",
      "AutoML测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n",
      "\n",
      "LGBM+KFold测试集预测分布:\n",
      "extrovert    4621\n",
      "introvert    1554\n",
      "Name: count, dtype: int64\n",
      "\n",
      "XGBoost+KFold测试集预测分布:\n",
      "extrovert    4614\n",
      "introvert    1561\n",
      "Name: count, dtype: int64\n",
      "\n",
      "CatBoost+KFold测试集预测分布:\n",
      "extrovert    4617\n",
      "introvert    1558\n",
      "Name: count, dtype: int64\n",
      "\n",
      "ANN+KFold测试集预测分布:\n",
      "extrovert    4609\n",
      "introvert    1566\n",
      "Name: count, dtype: int64\n",
      "\n",
      "Stacking融合后测试集预测分布:\n",
      "extrovert    4619\n",
      "introvert    1556\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "# from sklearn.neural_network import MLPClassifier\n",
    "\n",
    "# # -------------------- ANN 神经网络模型（KFold交叉验证） --------------------\n",
    "# ann_params = {\n",
    "#     'hidden_layer_sizes': (128, 64, 32, 16),  # 增加网络深度和宽度\n",
    "#     'activation': 'relu',\n",
    "#     'solver': 'adam',\n",
    "#     'max_iter': 300,  # 增加迭代次数\n",
    "#     'random_state': 42\n",
    "# }\n",
    "# ann_model = MLPClassifier(**ann_params)\n",
    "# ann_oof_preds = np.zeros(len(X_train), dtype=int)\n",
    "# ann_oof_probs = np.zeros((len(X_train), len(le.classes_)))\n",
    "# ann_fold_scores = []\n",
    "\n",
    "# for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):\n",
    "#     X_tr, X_val = X_train[train_idx], X_train[val_idx]\n",
    "#     y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]\n",
    "#     ann_model.fit(X_tr, y_tr)\n",
    "#     val_pred = ann_model.predict(X_val)\n",
    "#     val_prob = ann_model.predict_proba(X_val)\n",
    "#     ann_oof_preds[val_idx] = val_pred\n",
    "#     ann_oof_probs[val_idx] = val_prob\n",
    "#     acc = accuracy_score(y_val, val_pred)\n",
    "#     print(f\"[ANN] Fold {fold+1} accuracy: {acc:.4f}\")\n",
    "#     ann_fold_scores.append(acc)\n",
    "\n",
    "# print(\"\\n[ANN] 5折交叉验证平均准确率：\", np.mean(ann_fold_scores))\n",
    "# print(\"[ANN] OOF分类报告：\")\n",
    "# print(classification_report(y_train, le.inverse_transform(ann_oof_preds)))\n",
    "\n",
    "# # 全量数据训练并生成测试集预测\n",
    "# ann_model.fit(X_train, y_train_enc)\n",
    "# ann_preds_enc = ann_model.predict(X_test)\n",
    "# ann_preds = le.inverse_transform(ann_preds_enc)\n",
    "\n",
    "# df_ann_pred = pd.DataFrame({'id': test.index, 'Personality': ann_preds})\n",
    "# df_ann_pred['Personality'] = df_ann_pred['Personality'].str.capitalize()\n",
    "# # df_ann_pred.to_csv('submission_ann.csv', index=False)\n",
    "\n",
    "# # -------------------- 五模型Stacking融合 --------------------\n",
    "\n",
    "# # 构造一级模型的概率输出作为二级模型输入特征\n",
    "# oof_meta = np.column_stack([\n",
    "#     automl.model.predict_proba(X_train),\n",
    "#     lgbm_oof_probs,\n",
    "#     xgb_oof_probs,\n",
    "#     cat_oof_probs,\n",
    "#     ann_oof_probs\n",
    "# ])\n",
    "# meta_X_test = np.column_stack([\n",
    "#     automl.model.predict_proba(X_test),\n",
    "#     model.predict_proba(X_test),\n",
    "#     xgb_model.predict_proba(X_test),\n",
    "#     cat_model.predict_proba(X_test),\n",
    "#     ann_model.predict_proba(X_test)\n",
    "# ])\n",
    "\n",
    "# meta_model = LogisticRegression(max_iter=1000)\n",
    "# meta_model.fit(oof_meta, y_train_enc)\n",
    "# meta_preds_enc = meta_model.predict(meta_X_test)\n",
    "# meta_preds = le.inverse_transform(meta_preds_enc)\n",
    "\n",
    "# stacking_df = pd.DataFrame({'id': test.index, 'Personality': meta_preds})\n",
    "# stacking_df['Personality'] = stacking_df['Personality'].str.capitalize()\n",
    "# stacking_df.to_csv('submission.csv', index=False)\n",
    "\n",
    "\n",
    "# # -------------------- 预测分布对比（分析模型差异） --------------------\n",
    "# print('\\nAutoML测试集预测分布:')\n",
    "# print(pd.Series(automl_preds).value_counts())\n",
    "# print('\\nLGBM+KFold测试集预测分布:')\n",
    "# print(pd.Series(lgbm_preds).value_counts())\n",
    "# print('\\nXGBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(xgb_preds).value_counts())\n",
    "# print('\\nCatBoost+KFold测试集预测分布:')\n",
    "# print(pd.Series(cat_preds).value_counts())\n",
    "# print('\\nANN+KFold测试集预测分布:')\n",
    "# print(pd.Series(ann_preds).value_counts())\n",
    "# print('\\nStacking融合后测试集预测分布:')\n",
    "# print(pd.Series(meta_preds).value_counts())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 第一阶段实验结论\n",
    "\n",
    "1. automl 可以快速解决大多数问题。LGBM 又快又好用没有必要折腾太多。\n",
    "2. 堆叠多个树算法可以小幅度提升， 34->30 这样的排名。\n",
    "3. 增加神经网络没有明显效果提升。\n",
    "\n",
    "### 下一阶段思路：\n",
    "\n",
    "看其它参赛者的思路；做更多的特征。\n"
   ]
  }
 ],
 "metadata": {
  "kaggle": {
   "accelerator": "none",
   "dataSources": [
    {
     "databundleVersionId": 12738969,
     "sourceId": 91718,
     "sourceType": "competition"
    }
   ],
   "dockerImageVersionId": 31040,
   "isGpuEnabled": false,
   "isInternetEnabled": true,
   "language": "python",
   "sourceType": "notebook"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
