{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "283025d5",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "73bd3553",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8440031f",
   "metadata": {},
   "source": [
    "# 数据加载"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cf7f31e8",
   "metadata": {},
   "source": [
    "## 目标客户加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ca3bad1e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>DATA_DAT</th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>FLAG</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>20250731</td>\n",
       "      <td>13ef4241a1959ccbcf8d8a30f0ed9d50</td>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>20250731</td>\n",
       "      <td>029dede087234ee034590abefc4731a9</td>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>20250731</td>\n",
       "      <td>929838a9271aa18da0ad8cb5154ce591</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>20250731</td>\n",
       "      <td>51b04b6d47643e0f5303c38a429557d5</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>20250731</td>\n",
       "      <td>a54db8a4f36e43e9390cf3e43d45f308</td>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   DATA_DAT                           CUST_NO  FLAG\n",
       "0  20250731  13ef4241a1959ccbcf8d8a30f0ed9d50     9\n",
       "1  20250731  029dede087234ee034590abefc4731a9     9\n",
       "2  20250731  929838a9271aa18da0ad8cb5154ce591     4\n",
       "3  20250731  51b04b6d47643e0f5303c38a429557d5     2\n",
       "4  20250731  a54db8a4f36e43e9390cf3e43d45f308     9"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "target_cust_info = pd.read_csv('../DATA/TARGET_VALID.csv')\n",
    "target_cust_info.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "850cb671",
   "metadata": {},
   "source": [
    "## 特征文件加载"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eef572c1",
   "metadata": {},
   "source": [
    "### 通用pkl加载函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1e262d1e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征加载器已初始化\n"
     ]
    }
   ],
   "source": [
    "class FeatureLoader:\n",
    "    \"\"\"\n",
    "    特征文件加载器\n",
    "    支持批量加载feature目录下的所有pkl文件\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, feature_dir='./feature'):\n",
    "        \"\"\"\n",
    "        初始化加载器\n",
    "        \n",
    "        参数:\n",
    "        - feature_dir: 特征文件目录路径\n",
    "        \"\"\"\n",
    "        self.feature_dir = feature_dir\n",
    "        self.features_dict = {}\n",
    "        self.feature_info = {}\n",
    "        \n",
    "    def load_single_feature(self, file_path):\n",
    "        \"\"\"\n",
    "        加载单个pkl特征文件\n",
    "        \n",
    "        参数:\n",
    "        - file_path: pkl文件路径\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 特征数据\n",
    "        \"\"\"\n",
    "        try:\n",
    "            with open(file_path, 'rb') as f:\n",
    "                data = pickle.load(f)\n",
    "            \n",
    "            if not isinstance(data, pd.DataFrame):\n",
    "                raise ValueError(f\"文件 {file_path} 不是DataFrame格式\")\n",
    "            \n",
    "            return data\n",
    "        except Exception as e:\n",
    "            print(f\"加载文件 {file_path} 失败: {str(e)}\")\n",
    "            return None\n",
    "    \n",
    "    def load_all_features(self, pattern='*.pkl'):\n",
    "        \"\"\"\n",
    "        批量加载所有特征文件\n",
    "        \n",
    "        参数:\n",
    "        - pattern: 文件匹配模式\n",
    "        \n",
    "        返回:\n",
    "        - dict: {文件名: DataFrame}\n",
    "        \"\"\"\n",
    "        if not os.path.exists(self.feature_dir):\n",
    "            print(f\"目录不存在: {self.feature_dir}\")\n",
    "            return {}\n",
    "        \n",
    "        pkl_files = [f for f in os.listdir(self.feature_dir) if f.endswith('.pkl')]\n",
    "        \n",
    "        if not pkl_files:\n",
    "            print(f\"未找到pkl文件在目录: {self.feature_dir}\")\n",
    "            return {}\n",
    "        \n",
    "        print(f\"发现 {len(pkl_files)} 个特征文件\")\n",
    "        print(\"=\"*80)\n",
    "        \n",
    "        for pkl_file in pkl_files:\n",
    "            file_path = os.path.join(self.feature_dir, pkl_file)\n",
    "            file_name = os.path.splitext(pkl_file)[0]\n",
    "            \n",
    "            print(f\"\\n正在加载: {pkl_file}\")\n",
    "            data = self.load_single_feature(file_path)\n",
    "            \n",
    "            if data is not None:\n",
    "                self.features_dict[file_name] = data\n",
    "                \n",
    "                # 记录文件信息\n",
    "                self.feature_info[file_name] = {\n",
    "                    'file_path': file_path,\n",
    "                    'file_size_mb': os.path.getsize(file_path) / 1024 / 1024,\n",
    "                    'shape': data.shape,\n",
    "                    'memory_mb': data.memory_usage(deep=True).sum() / 1024 / 1024,\n",
    "                    'columns': data.columns.tolist()\n",
    "                }\n",
    "                \n",
    "                print(f\"  - 形状: {data.shape}\")\n",
    "                print(f\"  - 文件大小: {self.feature_info[file_name]['file_size_mb']:.2f} MB\")\n",
    "                print(f\"  - 内存占用: {self.feature_info[file_name]['memory_mb']:.2f} MB\")\n",
    "        \n",
    "        print(\"\\n\" + \"=\"*80)\n",
    "        print(f\"成功加载 {len(self.features_dict)} 个特征文件\")\n",
    "        \n",
    "        return self.features_dict\n",
    "    \n",
    "    def get_feature_summary(self):\n",
    "        \"\"\"\n",
    "        获取所有特征文件的汇总信息\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 汇总表\n",
    "        \"\"\"\n",
    "        if not self.feature_info:\n",
    "            print(\"请先加载特征文件\")\n",
    "            return None\n",
    "        \n",
    "        summary_data = []\n",
    "        for name, info in self.feature_info.items():\n",
    "            summary_data.append({\n",
    "                '特征文件': name,\n",
    "                '样本数': info['shape'][0],\n",
    "                '特征数': info['shape'][1] - 1 if 'CUST_NO' in info['columns'] else info['shape'][1],\n",
    "                '文件大小(MB)': round(info['file_size_mb'], 2),\n",
    "                '内存占用(MB)': round(info['memory_mb'], 2)\n",
    "            })\n",
    "        \n",
    "        summary_df = pd.DataFrame(summary_data)\n",
    "        summary_df = summary_df.sort_values('特征数', ascending=False).reset_index(drop=True)\n",
    "        \n",
    "        # 添加汇总行\n",
    "        total_row = pd.DataFrame([{\n",
    "            '特征文件': '总计',\n",
    "            '样本数': '-',\n",
    "            '特征数': summary_df['特征数'].sum(),\n",
    "            '文件大小(MB)': round(summary_df['文件大小(MB)'].sum(), 2),\n",
    "            '内存占用(MB)': round(summary_df['内存占用(MB)'].sum(), 2)\n",
    "        }])\n",
    "        \n",
    "        summary_df = pd.concat([summary_df, total_row], ignore_index=True)\n",
    "        \n",
    "        return summary_df\n",
    "\n",
    "# 创建加载器实例\n",
    "loader = FeatureLoader(feature_dir='./feature')\n",
    "print(\"特征加载器已初始化\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0ab626ec",
   "metadata": {},
   "source": [
    "### 加载所有特征文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c530b6ca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发现 10 个特征文件\n",
      "================================================================================\n",
      "\n",
      "正在加载: AGET_PAY_features.pkl\n",
      "  - 形状: (530, 47)\n",
      "  - 文件大小: 0.21 MB\n",
      "  - 内存占用: 0.23 MB\n",
      "\n",
      "正在加载: ASSET_features.pkl\n",
      "  - 形状: (5624, 139)\n",
      "  - 文件大小: 5.99 MB\n",
      "  - 内存占用: 6.27 MB\n",
      "\n",
      "正在加载: CCD_TR_DTL_features.pkl\n",
      "  - 形状: (18, 48)\n",
      "  - 文件大小: 0.01 MB\n",
      "  - 内存占用: 0.01 MB\n",
      "\n",
      "正在加载: mb_pageview_dtl_features.pkl\n",
      "  - 形状: (2753, 503)\n",
      "  - 文件大小: 10.60 MB\n",
      "  - 内存占用: 10.71 MB\n",
      "\n",
      "正在加载: MB_TRNFLW_QRYTRNFLW_features.pkl\n",
      "  - 形状: (3128, 239)\n",
      "  - 文件大小: 5.77 MB\n",
      "  - 内存占用: 5.92 MB\n",
      "\n",
      "正在加载: NATURE_features.pkl\n",
      "  - 形状: (5975, 26)\n",
      "  - 文件大小: 1.21 MB\n",
      "  - 内存占用: 1.51 MB\n",
      "\n",
      "正在加载: PROD_HOLD_features.pkl\n",
      "  - 形状: (5741, 41)\n",
      "  - 文件大小: 1.66 MB\n",
      "  - 内存占用: 1.95 MB\n",
      "\n",
      "正在加载: tr_aps_dtl_features.pkl\n",
      "  - 形状: (5616, 309)\n",
      "  - 文件大小: 13.29 MB\n",
      "  - 内存占用: 13.57 MB\n",
      "\n",
      "正在加载: TR_IBTF_features.pkl\n",
      "  - 形状: (2981, 86)\n",
      "  - 文件大小: 1.96 MB\n",
      "  - 内存占用: 2.11 MB\n",
      "\n",
      "正在加载: TR_TPAY_features.pkl\n",
      "  - 形状: (3595, 65)\n",
      "  - 文件大小: 1.76 MB\n",
      "  - 内存占用: 1.94 MB\n",
      "\n",
      "================================================================================\n",
      "成功加载 10 个特征文件\n",
      "\n",
      "特征文件汇总:\n",
      "                            特征文件   样本数   特征数  文件大小(MB)  内存占用(MB)\n",
      "0       mb_pageview_dtl_features  2753   502     10.60     10.71\n",
      "1            tr_aps_dtl_features  5616   308     13.29     13.57\n",
      "2   MB_TRNFLW_QRYTRNFLW_features  3128   238      5.77      5.92\n",
      "3                 ASSET_features  5624   138      5.99      6.27\n",
      "4               TR_IBTF_features  2981    85      1.96      2.11\n",
      "5               TR_TPAY_features  3595    64      1.76      1.94\n",
      "6            CCD_TR_DTL_features    18    47      0.01      0.01\n",
      "7              AGET_PAY_features   530    46      0.21      0.23\n",
      "8             PROD_HOLD_features  5741    40      1.66      1.95\n",
      "9                NATURE_features  5975    25      1.21      1.51\n",
      "10                            总计     -  1493     42.46     44.22\n"
     ]
    }
   ],
   "source": [
    "# 加载所有特征文件\n",
    "features_dict = loader.load_all_features()\n",
    "\n",
    "# 查看特征文件汇总\n",
    "summary_df = loader.get_feature_summary()\n",
    "print(\"\\n特征文件汇总:\")\n",
    "print(summary_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "62051560",
   "metadata": {},
   "source": [
    "# 建模训练"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "567e4c5d",
   "metadata": {},
   "source": [
    "## 数据合并与预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "19d7fb13",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "初始化数据合并器...\n",
      "\n",
      "开始合并所有特征...\n",
      "================================================================================\n",
      "初始数据形状 (目标表): (5975, 3)\n",
      "初始列: ['DATA_DAT', 'CUST_NO', 'FLAG']\n",
      "\n",
      "[1/10] 合并特征: AGET_PAY_features\n",
      "  - 特征表形状: (530, 47)\n",
      "  - 新增特征数: 46\n",
      "  - 当前总列数: 49\n",
      "  - 当前数据形状: (5975, 49)\n",
      "\n",
      "[2/10] 合并特征: ASSET_features\n",
      "  - 特征表形状: (5624, 139)\n",
      "  - 新增特征数: 138\n",
      "  - 当前总列数: 187\n",
      "  - 当前数据形状: (5975, 187)\n",
      "\n",
      "[3/10] 合并特征: CCD_TR_DTL_features\n",
      "  - 特征表形状: (18, 48)\n",
      "  - 新增特征数: 47\n",
      "  - 当前总列数: 234\n",
      "  - 当前数据形状: (5975, 234)\n",
      "\n",
      "[4/10] 合并特征: mb_pageview_dtl_features\n",
      "  - 特征表形状: (2753, 503)\n",
      "  - 新增特征数: 502\n",
      "  - 当前总列数: 736\n",
      "  - 当前数据形状: (5975, 736)\n",
      "\n",
      "[5/10] 合并特征: MB_TRNFLW_QRYTRNFLW_features\n",
      "  - 特征表形状: (3128, 239)\n",
      "  - 新增特征数: 238\n",
      "  - 当前总列数: 974\n",
      "  - 当前数据形状: (5975, 974)\n",
      "\n",
      "[6/10] 合并特征: NATURE_features\n",
      "  - 特征表形状: (5975, 26)\n",
      "  - 新增特征数: 25\n",
      "  - 当前总列数: 999\n",
      "  - 当前数据形状: (5975, 999)\n",
      "\n",
      "[7/10] 合并特征: PROD_HOLD_features\n",
      "  - 特征表形状: (5741, 41)\n",
      "  - 新增特征数: 40\n",
      "  - 当前总列数: 1039\n",
      "  - 当前数据形状: (5975, 1039)\n",
      "\n",
      "[8/10] 合并特征: tr_aps_dtl_features\n",
      "  - 特征表形状: (5616, 309)\n",
      "  - 新增特征数: 308\n",
      "  - 当前总列数: 1347\n",
      "  - 当前数据形状: (5975, 1347)\n",
      "\n",
      "[9/10] 合并特征: TR_IBTF_features\n",
      "  - 特征表形状: (2981, 86)\n",
      "  - 新增特征数: 85\n",
      "  - 当前总列数: 1432\n",
      "  - 当前数据形状: (5975, 1432)\n",
      "\n",
      "[10/10] 合并特征: TR_TPAY_features\n",
      "  - 特征表形状: (3595, 65)\n",
      "  - 新增特征数: 64\n",
      "  - 当前总列数: 1496\n",
      "  - 当前数据形状: (5975, 1496)\n",
      "\n",
      "================================================================================\n",
      "合并完成! 最终数据形状: (5975, 1496)\n",
      "最终特征数: 1494\n",
      "\n",
      "合并摘要:\n",
      "                             数据表  原始样本数   特征数     缺失率\n",
      "0                          目标客户表   5975     2   0.00%\n",
      "1              AGET_PAY_features    530    46   6.05%\n",
      "2                 ASSET_features   5624   138   0.00%\n",
      "3            CCD_TR_DTL_features     18    47   4.26%\n",
      "4       mb_pageview_dtl_features   2753   502  62.59%\n",
      "5   MB_TRNFLW_QRYTRNFLW_features   3128   238   0.00%\n",
      "6                NATURE_features   5975    25   0.00%\n",
      "7             PROD_HOLD_features   5741    40  62.50%\n",
      "8            tr_aps_dtl_features   5616   308   0.00%\n",
      "9               TR_IBTF_features   2981    85   0.00%\n",
      "10              TR_TPAY_features   3595    64   0.00%\n",
      "11                         合并后数据   5975  1494  49.41%\n",
      "\n",
      "数据质量检查...\n",
      "================================================================================\n",
      "\n",
      "1. 重复记录数: 0\n",
      "\n",
      "2. 缺失值统计:\n",
      "   - 有缺失的列数: 1469\n",
      "   - 缺失最严重的前10列:\n",
      "     BOND_IND: 5975 (100.00%)\n",
      "     PROD_DEPOSIT_TYPE: 5975 (100.00%)\n",
      "     CCARD_IND: 5975 (100.00%)\n",
      "     DCARD_IND: 5975 (100.00%)\n",
      "     IL_IND: 5975 (100.00%)\n",
      "     DP_IND: 5975 (100.00%)\n",
      "     METAL_IND: 5975 (100.00%)\n",
      "     PAY_IND: 5975 (100.00%)\n",
      "     EBNK_IND: 5975 (100.00%)\n",
      "     MB_IND: 5975 (100.00%)\n",
      "\n",
      "3. 常量列数: 47\n",
      "\n",
      "4. 数据类型分布:\n",
      "   float64: 1483\n",
      "   int64: 6\n",
      "   int32: 6\n",
      "   object: 1\n",
      "\n",
      "5. 目标变量分布 (FLAG):\n",
      "   类别 1: 169 (2.83%)\n",
      "   类别 2: 73 (1.22%)\n",
      "   类别 4: 2987 (49.99%)\n",
      "   类别 5: 1068 (17.87%)\n",
      "   类别 7: 137 (2.29%)\n",
      "   类别 8: 280 (4.69%)\n",
      "   类别 9: 1250 (20.92%)\n",
      "   类别 10: 11 (0.18%)\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "class DataMerger:\n",
    "    \"\"\"\n",
    "    数据合并器 - 负责将所有特征表与目标表合并\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, features_dict, target_df, key='CUST_NO'):\n",
    "        \"\"\"\n",
    "        初始化合并器\n",
    "        \n",
    "        参数:\n",
    "        - features_dict: 特征字典 {特征名: DataFrame}\n",
    "        - target_df: 目标客户表\n",
    "        - key: 主键列名\n",
    "        \"\"\"\n",
    "        self.features_dict = features_dict\n",
    "        self.target_df = target_df.copy()\n",
    "        self.key = key\n",
    "        self.merged_data = None\n",
    "        \n",
    "    def merge_all_features(self):\n",
    "        \"\"\"\n",
    "        将所有特征表与目标表合并\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 合并后的完整数据\n",
    "        \"\"\"\n",
    "        print(\"\\n开始合并所有特征...\")\n",
    "        print(\"=\"*80)\n",
    "        \n",
    "        # 从目标表开始\n",
    "        self.merged_data = self.target_df.copy()\n",
    "        print(f\"初始数据形状 (目标表): {self.merged_data.shape}\")\n",
    "        print(f\"初始列: {self.merged_data.columns.tolist()}\")\n",
    "        \n",
    "        # 逐个合并特征表\n",
    "        for idx, (feature_name, feature_df) in enumerate(self.features_dict.items(), 1):\n",
    "            print(f\"\\n[{idx}/{len(self.features_dict)}] 合并特征: {feature_name}\")\n",
    "            print(f\"  - 特征表形状: {feature_df.shape}\")\n",
    "            \n",
    "            # 检查是否有主键\n",
    "            if self.key not in feature_df.columns:\n",
    "                print(f\"  警告: {feature_name} 没有主键 {self.key}, 跳过\")\n",
    "                continue\n",
    "            \n",
    "            # 执行合并\n",
    "            before_cols = len(self.merged_data.columns)\n",
    "            self.merged_data = self.merged_data.merge(\n",
    "                feature_df, \n",
    "                on=self.key, \n",
    "                how='left'\n",
    "            )\n",
    "            after_cols = len(self.merged_data.columns)\n",
    "            added_cols = after_cols - before_cols\n",
    "            \n",
    "            print(f\"  - 新增特征数: {added_cols}\")\n",
    "            print(f\"  - 当前总列数: {after_cols}\")\n",
    "            print(f\"  - 当前数据形状: {self.merged_data.shape}\")\n",
    "        \n",
    "        print(\"\\n\" + \"=\"*80)\n",
    "        print(f\"合并完成! 最终数据形状: {self.merged_data.shape}\")\n",
    "        print(f\"最终特征数: {self.merged_data.shape[1] - len([self.key, 'FLAG'])}\")\n",
    "        \n",
    "        return self.merged_data\n",
    "    \n",
    "    def get_merge_summary(self):\n",
    "        \"\"\"\n",
    "        获取合并摘要信息\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 合并摘要表\n",
    "        \"\"\"\n",
    "        if self.merged_data is None:\n",
    "            print(\"请先执行 merge_all_features()\")\n",
    "            return None\n",
    "        \n",
    "        summary_data = []\n",
    "        \n",
    "        # 目标表信息\n",
    "        summary_data.append({\n",
    "            '数据表': '目标客户表',\n",
    "            '原始样本数': len(self.target_df),\n",
    "            '特征数': len(self.target_df.columns) - 1,\n",
    "            '缺失率': f\"{self.target_df.isnull().sum().sum() / (self.target_df.shape[0] * self.target_df.shape[1]) * 100:.2f}%\"\n",
    "        })\n",
    "        \n",
    "        # 各特征表信息\n",
    "        for feature_name, feature_df in self.features_dict.items():\n",
    "            if self.key in feature_df.columns:\n",
    "                feature_cols = [col for col in feature_df.columns if col != self.key]\n",
    "                summary_data.append({\n",
    "                    '数据表': feature_name,\n",
    "                    '原始样本数': len(feature_df),\n",
    "                    '特征数': len(feature_cols),\n",
    "                    '缺失率': f\"{feature_df[feature_cols].isnull().sum().sum() / (feature_df.shape[0] * len(feature_cols)) * 100:.2f}%\"\n",
    "                })\n",
    "        \n",
    "        # 合并后信息\n",
    "        summary_data.append({\n",
    "            '数据表': '合并后数据',\n",
    "            '原始样本数': len(self.merged_data),\n",
    "            '特征数': self.merged_data.shape[1] - len([self.key, 'FLAG']),\n",
    "            '缺失率': f\"{self.merged_data.drop(columns=[self.key, 'FLAG'], errors='ignore').isnull().sum().sum() / (self.merged_data.shape[0] * (self.merged_data.shape[1] - 2)) * 100:.2f}%\"\n",
    "        })\n",
    "        \n",
    "        summary_df = pd.DataFrame(summary_data)\n",
    "        return summary_df\n",
    "    \n",
    "    def check_data_quality(self):\n",
    "        \"\"\"\n",
    "        检查合并后数据质量\n",
    "        \n",
    "        返回:\n",
    "        - dict: 质量报告\n",
    "        \"\"\"\n",
    "        if self.merged_data is None:\n",
    "            print(\"请先执行 merge_all_features()\")\n",
    "            return None\n",
    "        \n",
    "        print(\"\\n数据质量检查...\")\n",
    "        print(\"=\"*80)\n",
    "        \n",
    "        # 1. 检查重复记录\n",
    "        duplicates = self.merged_data.duplicated(subset=[self.key]).sum()\n",
    "        print(f\"\\n1. 重复记录数: {duplicates}\")\n",
    "        \n",
    "        # 2. 检查缺失值\n",
    "        missing_stats = self.merged_data.isnull().sum().sort_values(ascending=False)\n",
    "        missing_cols = missing_stats[missing_stats > 0]\n",
    "        print(f\"\\n2. 缺失值统计:\")\n",
    "        print(f\"   - 有缺失的列数: {len(missing_cols)}\")\n",
    "        print(f\"   - 缺失最严重的前10列:\")\n",
    "        if len(missing_cols) > 0:\n",
    "            for col, count in missing_cols.head(10).items():\n",
    "                rate = count / len(self.merged_data) * 100\n",
    "                print(f\"     {col}: {count} ({rate:.2f}%)\")\n",
    "        \n",
    "        # 3. 检查常量列\n",
    "        constant_cols = []\n",
    "        for col in self.merged_data.columns:\n",
    "            if col not in [self.key, 'FLAG']:\n",
    "                if self.merged_data[col].nunique() <= 1:\n",
    "                    constant_cols.append(col)\n",
    "        print(f\"\\n3. 常量列数: {len(constant_cols)}\")\n",
    "        if len(constant_cols) > 0 and len(constant_cols) <= 10:\n",
    "            print(f\"   列名: {constant_cols}\")\n",
    "        \n",
    "        # 4. 数据类型统计\n",
    "        dtype_counts = self.merged_data.dtypes.value_counts()\n",
    "        print(f\"\\n4. 数据类型分布:\")\n",
    "        for dtype, count in dtype_counts.items():\n",
    "            print(f\"   {dtype}: {count}\")\n",
    "        \n",
    "        # 5. 目标变量统计\n",
    "        if 'FLAG' in self.merged_data.columns:\n",
    "            flag_dist = self.merged_data['FLAG'].value_counts().sort_index()\n",
    "            print(f\"\\n5. 目标变量分布 (FLAG):\")\n",
    "            for flag, count in flag_dist.items():\n",
    "                rate = count / len(self.merged_data) * 100\n",
    "                print(f\"   类别 {flag}: {count} ({rate:.2f}%)\")\n",
    "        \n",
    "        print(\"=\"*80)\n",
    "        \n",
    "        quality_report = {\n",
    "            'duplicates': duplicates,\n",
    "            'missing_cols': len(missing_cols),\n",
    "            'constant_cols': constant_cols,\n",
    "            'dtype_counts': dtype_counts.to_dict()\n",
    "        }\n",
    "        \n",
    "        return quality_report\n",
    "\n",
    "# 创建合并器并执行合并\n",
    "print(\"初始化数据合并器...\")\n",
    "merger = DataMerger(features_dict, target_cust_info)\n",
    "\n",
    "# 执行合并\n",
    "full_data = merger.merge_all_features()\n",
    "\n",
    "# 查看合并摘要\n",
    "merge_summary = merger.get_merge_summary()\n",
    "print(\"\\n合并摘要:\")\n",
    "print(merge_summary)\n",
    "\n",
    "# 数据质量检查\n",
    "quality_report = merger.check_data_quality()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7c09c077",
   "metadata": {},
   "source": [
    "## 数据预处理与特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "0bb5ed32",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "数据预处理...\n",
      "================================================================================\n",
      "原始数据形状: (5975, 1496)\n",
      "\n",
      "删除 234 个高缺失列 (缺失率 > 95.0%)\n",
      "删除 20 个常量列\n",
      "\n",
      "预处理后数据形状: (5975, 1242)\n",
      "================================================================================\n",
      "\n",
      "预处理完成!\n",
      "特征数: 1240\n"
     ]
    }
   ],
   "source": [
    "# 数据预处理函数\n",
    "def preprocess_data(data):\n",
    "    \"\"\"\n",
    "    数据预处理\n",
    "    1. 删除高缺失列\n",
    "    2. 删除常量列\n",
    "    3. 删除唯一值列\n",
    "    4. 处理无穷值\n",
    "    \"\"\"\n",
    "    data_processed = data.copy()\n",
    "    \n",
    "    print(\"\\n数据预处理...\")\n",
    "    print(\"=\"*80)\n",
    "    print(f\"原始数据形状: {data_processed.shape}\")\n",
    "    \n",
    "    # 1. 删除高缺失列 (缺失率 > 95%)\n",
    "    missing_threshold = 0.95\n",
    "    missing_ratio = data_processed.isnull().sum() / len(data_processed)\n",
    "    high_missing_cols = missing_ratio[missing_ratio > missing_threshold].index.tolist()\n",
    "    \n",
    "    # 排除关键列\n",
    "    high_missing_cols = [col for col in high_missing_cols if col not in ['CUST_NO', 'FLAG']]\n",
    "    \n",
    "    if len(high_missing_cols) > 0:\n",
    "        print(f\"\\n删除 {len(high_missing_cols)} 个高缺失列 (缺失率 > {missing_threshold*100}%)\")\n",
    "        data_processed = data_processed.drop(columns=high_missing_cols)\n",
    "    \n",
    "    # 2. 删除常量列\n",
    "    constant_cols = []\n",
    "    for col in data_processed.columns:\n",
    "        if col not in ['CUST_NO', 'FLAG']:\n",
    "            if data_processed[col].nunique() <= 1:\n",
    "                constant_cols.append(col)\n",
    "    \n",
    "    if len(constant_cols) > 0:\n",
    "        print(f\"删除 {len(constant_cols)} 个常量列\")\n",
    "        data_processed = data_processed.drop(columns=constant_cols)\n",
    "    \n",
    "    # 3. 删除唯一值列 (每个值都不同)\n",
    "    unique_cols = []\n",
    "    for col in data_processed.columns:\n",
    "        if col not in ['CUST_NO', 'FLAG']:\n",
    "            if data_processed[col].nunique() == len(data_processed):\n",
    "                unique_cols.append(col)\n",
    "    \n",
    "    if len(unique_cols) > 0:\n",
    "        print(f\"删除 {len(unique_cols)} 个唯一值列\")\n",
    "        data_processed = data_processed.drop(columns=unique_cols)\n",
    "    \n",
    "    # 4. 处理无穷值\n",
    "    numeric_cols = data_processed.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    numeric_cols = [col for col in numeric_cols if col not in ['CUST_NO', 'FLAG']]\n",
    "    \n",
    "    inf_count = 0\n",
    "    for col in numeric_cols:\n",
    "        inf_mask = np.isinf(data_processed[col])\n",
    "        if inf_mask.any():\n",
    "            inf_count += inf_mask.sum()\n",
    "            # 将正无穷替换为该列最大有限值，负无穷替换为最小有限值\n",
    "            finite_vals = data_processed.loc[~inf_mask, col]\n",
    "            if len(finite_vals) > 0:\n",
    "                data_processed.loc[data_processed[col] == np.inf, col] = finite_vals.max()\n",
    "                data_processed.loc[data_processed[col] == -np.inf, col] = finite_vals.min()\n",
    "            else:\n",
    "                data_processed.loc[inf_mask, col] = np.nan\n",
    "    \n",
    "    if inf_count > 0:\n",
    "        print(f\"处理 {inf_count} 个无穷值\")\n",
    "    \n",
    "    print(f\"\\n预处理后数据形状: {data_processed.shape}\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return data_processed\n",
    "\n",
    "# 执行预处理\n",
    "full_data_processed = preprocess_data(full_data)\n",
    "print(f\"\\n预处理完成!\")\n",
    "print(f\"特征数: {full_data_processed.shape[1] - 2}\")  # 减去CUST_NO和FLAG"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4c635e31",
   "metadata": {},
   "source": [
    "## AutoGluon模型训练配置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "0ca79047",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Macro-F1评分器已创建\n",
      "评分器名称: macro_f1\n",
      "最优值: 1.0\n",
      "越大越好: True\n"
     ]
    }
   ],
   "source": [
    "# 定义Macro-F1评分函数\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "def macro_f1_score(y_true, y_pred):\n",
    "    \"\"\"\n",
    "    计算Macro-F1分数\n",
    "    适用于多分类问题\n",
    "    \"\"\"\n",
    "    return f1_score(y_true, y_pred, average='macro')\n",
    "\n",
    "# 创建AutoGluon自定义评分器\n",
    "macro_f1_scorer = make_scorer(\n",
    "    name='macro_f1',\n",
    "    score_func=macro_f1_score,\n",
    "    optimum=1.0,\n",
    "    greater_is_better=True\n",
    ")\n",
    "\n",
    "print(\"Macro-F1评分器已创建\")\n",
    "print(f\"评分器名称: {macro_f1_scorer.name}\")\n",
    "print(f\"最优值: {macro_f1_scorer.optimum}\")\n",
    "print(f\"越大越好: {macro_f1_scorer.greater_is_better}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "f96d58d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "准备训练数据...\n",
      "================================================================================\n",
      "训练集样本数: 5975\n",
      "测试集样本数: 0\n",
      "\n",
      "目标变量分布:\n",
      "  类别 1: 169 (2.83%)\n",
      "  类别 2: 73 (1.22%)\n",
      "  类别 4: 2987 (49.99%)\n",
      "  类别 5: 1068 (17.87%)\n",
      "  类别 7: 137 (2.29%)\n",
      "  类别 8: 280 (4.69%)\n",
      "  类别 9: 1250 (20.92%)\n",
      "  类别 10: 11 (0.18%)\n",
      "\n",
      "测试集客户数: 0\n",
      "\n",
      "最终特征数: 1240\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 数据集准备\n",
    "print(\"\\n准备训练数据...\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 分离训练集和测试集 (假设TARGET_VALID.csv中有FLAG为空的是测试集)\n",
    "train_data = full_data_processed[full_data_processed['FLAG'].notna()].copy()\n",
    "test_data = full_data_processed[full_data_processed['FLAG'].isna()].copy()\n",
    "\n",
    "print(f\"训练集样本数: {len(train_data)}\")\n",
    "print(f\"测试集样本数: {len(test_data)}\")\n",
    "\n",
    "# 目标变量分布\n",
    "flag_dist = train_data['FLAG'].value_counts().sort_index()\n",
    "print(f\"\\n目标变量分布:\")\n",
    "for flag, count in flag_dist.items():\n",
    "    rate = count / len(train_data) * 100\n",
    "    print(f\"  类别 {int(flag)}: {count} ({rate:.2f}%)\")\n",
    "\n",
    "# 保存测试集的CUST_NO用于后续提交\n",
    "test_cust_no = test_data['CUST_NO'].copy()\n",
    "print(f\"\\n测试集客户数: {len(test_cust_no)}\")\n",
    "\n",
    "# 删除CUST_NO列(保留在train_data和test_data中)\n",
    "feature_cols = [col for col in train_data.columns if col not in ['CUST_NO']]\n",
    "print(f\"\\n最终特征数: {len(feature_cols) - 1}\")  # 减去FLAG\n",
    "print(\"=\"*80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3402a578",
   "metadata": {},
   "source": [
    "## AutoGluon训练 - 阶段1: 快速基线模型"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c45bf36d",
   "metadata": {},
   "source": [
    "### 特征重要性计算优化说明\n",
    "\n",
    "**为什么特征重要性计算很慢?**\n",
    "\n",
    "AutoGluon使用 **排列特征重要性(Permutation Feature Importance)** 方法:\n",
    "1. 对每个特征进行随机打乱\n",
    "2. 观察模型性能下降程度\n",
    "3. 需要对1240个特征分别进行打乱和预测\n",
    "\n",
    "**优化参数说明:**\n",
    "\n",
    "| 参数 | 默认值 | 优化值(基线) | 优化值(主模型) | 说明 |\n",
    "|------|--------|-------------|---------------|------|\n",
    "| `subsample_size` | 全部样本(5975) | 1000 | 2000 | 减少计算样本数 |\n",
    "| `num_shuffle_sets` | 5或10 | 1 | 3 | 减少随机打乱次数 |\n",
    "| `time_limit` | None | 60秒 | 300秒 | 限制最大计算时间 |\n",
    "\n",
    "**预期加速效果:**\n",
    "- **原始**: ~3200秒 (53分钟)\n",
    "- **优化后**: ~60-300秒 (1-5分钟)\n",
    "- **加速比**: 10-50倍\n",
    "\n",
    "**注意**: 优化后的特征重要性仍然足够准确,适合快速迭代和特征筛选。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "9e5fcd51",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Warning: path already exists! This predictor may overwrite an existing predictor! path=\"./model/autogluon_baseline\"\n",
      "Verbosity: 2 (Standard Logging)\n",
      "=================== System Info ===================\n",
      "AutoGluon Version:  1.3.1\n",
      "Python Version:     3.10.18\n",
      "Operating System:   Windows\n",
      "Platform Machine:   AMD64\n",
      "Platform Version:   10.0.26100\n",
      "CPU Count:          32\n",
      "Memory Avail:       9.64 GB / 31.18 GB (30.9%)\n",
      "Disk Space Avail:   202.86 GB / 652.87 GB (31.1%)\n",
      "===================================================\n",
      "Presets specified: ['medium_quality']\n",
      "Beginning AutoGluon training ... Time limit = 600s\n",
      "AutoGluon will save models to \"d:\\DevProject\\5-Model\\star-cup2025\\Model\\model\\autogluon_baseline\"\n",
      "Train Data Rows:    5975\n",
      "Train Data Columns: 1240\n",
      "Label Column:       FLAG\n",
      "Problem Type:       multiclass\n",
      "Preprocessing data ...\n",
      "Train Data Class Count: 8\n",
      "Using Feature Generators to preprocess the data ...\n",
      "Fitting AutoMLPipelineFeatureGenerator...\n",
      "\tAvailable Memory:                    9870.82 MB\n",
      "\tTrain Data (Original)  Memory Usage: 56.39 MB (0.6% of available memory)\n",
      "\tInferring data type of each feature based on column values. Set feature_metadata_in to manually specify special dtypes of the features.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "阶段1: 快速基线模型训练\n",
      "================================================================================\n",
      "\n",
      "模型输出目录: ./model/autogluon_baseline\n",
      "训练数据形状: (5975, 1241)\n",
      "特征数: 1240\n",
      "目标变量: FLAG (类别数: 8)\n",
      "\n",
      "训练配置:\n",
      "  - 预设质量: medium_quality\n",
      "  - 时间限制: 600秒 (10分钟)\n",
      "  - 评估指标: macro_f1\n",
      "  - 交叉验证: 5折\n",
      "\n",
      "开始训练...(预计 10 分钟)\n",
      "--------------------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\tStage 1 Generators:\n",
      "\t\tFitting AsTypeFeatureGenerator...\n",
      "\t\t\tNote: Converting 9 features to boolean dtype as they only contain 2 unique values.\n",
      "\tStage 2 Generators:\n",
      "\t\tFitting FillNaFeatureGenerator...\n",
      "\tStage 3 Generators:\n",
      "\t\tFitting IdentityFeatureGenerator...\n",
      "\tStage 4 Generators:\n",
      "\t\tFitting DropUniqueFeatureGenerator...\n",
      "\tStage 5 Generators:\n",
      "\t\tFitting DropDuplicatesFeatureGenerator...\n",
      "\tUnused Original Features (Count: 38): ['AST_AUM_BAL_MAX', 'total_page_visits', 'total_model_visits', 'module_d70f9f4b8952b0a2ad8879c7eb3d8813_last_visit', 'module_f922b886e82201a198513c4f00408a4b_last_visit', 'module_64def4748e4131f437ea29acb56ad113_last_visit', 'total_visit_count', 'daily_visits_mean', 'daily_visits_max', 'daily_visits_min', 'daily_visits_std', 'first_visit_days', 'last_visit_days', 'm0_visit_count', 'm0_page_nunique', 'm0_module_nunique', 'm1_visit_count', 'm1_page_nunique', 'm1_module_nunique', 'm2_visit_count', 'm2_page_nunique', 'm2_module_nunique', 'daily_unique_pages_mean', 'daily_unique_pages_max', 'daily_unique_pages_std', 'mb_trnflw_transcode_nunique_last_90d', 'mb_trnflw_top19_transcode_amount_sum', 'mb_trnflw_top19_transcode_amount_mean', 'mb_trnflw_top20_transcode_amount_mean', 'NATURE_AGE_BY_SEX_STD', 'aps_txn_count_7d', 'aps_txn_count_15d', 'aps_txn_count_30d', 'aps_txn_count_60d', 'aps_txn_count_90d', 'aps_last_month_amt', 'TPAY_MOTH_NET_RATE', 'TPAY_SEAN_NET_RATE']\n",
      "\t\tThese features were not used to generate any of the output features. Add a feature generator compatible with these features to utilize them.\n",
      "\t\tFeatures can also be unused if they carry very little information, such as being categorical but having almost entirely unique values or being duplicates of other features.\n",
      "\t\tThese features do not need to be present at inference time.\n",
      "\t\t('float', []) : 38 | ['AST_AUM_BAL_MAX', 'total_page_visits', 'total_model_visits', 'module_d70f9f4b8952b0a2ad8879c7eb3d8813_last_visit', 'module_f922b886e82201a198513c4f00408a4b_last_visit', ...]\n",
      "\tTypes of features in original data (raw dtype, special dtypes):\n",
      "\t\t('float', []) : 1192 | ['aget_pay_count', 'aget_pay_unit_count', 'tr_amt_sum', 'tr_amt_mean', 'tr_amt_std', ...]\n",
      "\t\t('int', [])   :   10 | ['NATURE_SEX_CD', 'NATURE_RANK_CD', 'NATURE_SEAN_ACTV_IND', 'NATURE_AGE_GROUP', 'NATURE_SEX_RANK_INTERACT', ...]\n",
      "\tTypes of features in processed data (raw dtype, special dtypes):\n",
      "\t\t('float', [])     : 1191 | ['aget_pay_count', 'aget_pay_unit_count', 'tr_amt_sum', 'tr_amt_mean', 'tr_amt_std', ...]\n",
      "\t\t('int', [])       :    3 | ['NATURE_RANK_CD', 'NATURE_AGE_GROUP', 'NATURE_SEX_RANK_INTERACT']\n",
      "\t\t('int', ['bool']) :    8 | ['NATURE_SEX_CD', 'NATURE_SEAN_ACTV_IND', 'NATURE_AGE_BY_SEX_MEAN', 'NATURE_IS_YOUNG', 'NATURE_IS_MIDDLE', ...]\n",
      "\t4.8s = Fit runtime\n",
      "\t1202 features in original data used to generate 1202 features in processed data.\n",
      "\tTrain Data (Processed) Memory Usage: 54.45 MB (0.6% of available memory)\n",
      "Data preprocessing and feature engineering runtime = 4.92s ...\n",
      "AutoGluon will gauge predictive performance using evaluation metric: 'macro_f1'\n",
      "\tTo change this, specify the eval_metric parameter of Predictor()\n",
      "User-specified model hyperparameters to be fit:\n",
      "{\n",
      "\t'NN_TORCH': [{}],\n",
      "\t'GBM': [{'extra_trees': True, 'ag_args': {'name_suffix': 'XT'}}, {}, {'learning_rate': 0.03, 'num_leaves': 128, 'feature_fraction': 0.9, 'min_data_in_leaf': 3, 'ag_args': {'name_suffix': 'Large', 'priority': 0, 'hyperparameter_tune_kwargs': None}}],\n",
      "\t'CAT': [{}],\n",
      "\t'XGB': [{}],\n",
      "\t'FASTAI': [{}],\n",
      "\t'RF': [{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}}, {'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}}, {'criterion': 'squared_error', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression', 'quantile']}}],\n",
      "\t'XT': [{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}}, {'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}}, {'criterion': 'squared_error', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression', 'quantile']}}],\n",
      "\t'KNN': [{'weights': 'uniform', 'ag_args': {'name_suffix': 'Unif'}}, {'weights': 'distance', 'ag_args': {'name_suffix': 'Dist'}}],\n",
      "}\n",
      "Fitting 13 L1 models, fit_strategy=\"sequential\" ...\n",
      "Fitting model: KNeighborsUnif_BAG_L1 ... Training model for up to 595.08s of the 595.08s of remaining time.\n",
      "\t0.2808\t = Validation score   (macro_f1)\n",
      "\t0.15s\t = Training   runtime\n",
      "\t0.37s\t = Validation runtime\n",
      "Fitting model: KNeighborsDist_BAG_L1 ... Training model for up to 594.39s of the 594.39s of remaining time.\n",
      "\t0.2036\t = Validation score   (macro_f1)\n",
      "\t0.16s\t = Training   runtime\n",
      "\t0.36s\t = Validation runtime\n",
      "Fitting model: NeuralNetFastAI_BAG_L1 ... Training model for up to 593.71s of the 593.71s of remaining time.\n",
      "\tFitting 5 child models (S1F1 - S1F5) | Fitting with SequentialLocalFoldFittingStrategy\n",
      "\tWarning: Exception caused NeuralNetFastAI_BAG_L1 to fail during training (ImportError)... Skipping this model.\n",
      "\t\tImport fastai failed. A quick tip is to install via `pip install autogluon.tabular[fastai]==1.3.1`. \n",
      "Fitting model: LightGBMXT_BAG_L1 ... Training model for up to 593.14s of the 593.14s of remaining time.\n",
      "\tFitting 5 child models (S1F1 - S1F5) | Fitting with SequentialLocalFoldFittingStrategy\n",
      "\t0.547\t = Validation score   (macro_f1)\n",
      "\t129.86s\t = Training   runtime\n",
      "\t0.1s\t = Validation runtime\n",
      "Fitting model: LightGBM_BAG_L1 ... Training model for up to 462.56s of the 462.56s of remaining time.\n",
      "\tFitting 5 child models (S1F1 - S1F5) | Fitting with SequentialLocalFoldFittingStrategy\n",
      "\t0.5486\t = Validation score   (macro_f1)\n",
      "\t148.37s\t = Training   runtime\n",
      "\t0.1s\t = Validation runtime\n",
      "Fitting model: RandomForestGini_BAG_L1 ... Training model for up to 313.33s of the 313.33s of remaining time.\n",
      "\t0.5011\t = Validation score   (macro_f1)\n",
      "\t1.29s\t = Training   runtime\n",
      "\t1.45s\t = Validation runtime\n",
      "Fitting model: RandomForestEntr_BAG_L1 ... Training model for up to 310.35s of the 310.35s of remaining time.\n",
      "\t0.5126\t = Validation score   (macro_f1)\n",
      "\t1.62s\t = Training   runtime\n",
      "\t1.47s\t = Validation runtime\n",
      "Fitting model: CatBoost_BAG_L1 ... Training model for up to 307.08s of the 307.08s of remaining time.\n",
      "\tFitting 5 child models (S1F1 - S1F5) | Fitting with SequentialLocalFoldFittingStrategy\n",
      "\tMany features detected (1202), dynamically setting 'colsample_bylevel' to 0.831946755407654 to speed up training (Default = 1).\n",
      "\tTo disable this functionality, explicitly specify 'colsample_bylevel' in the model hyperparameters.\n",
      "\tRan out of time, early stopping on iteration 153.\n",
      "\tMany features detected (1202), dynamically setting 'colsample_bylevel' to 0.831946755407654 to speed up training (Default = 1).\n",
      "\tTo disable this functionality, explicitly specify 'colsample_bylevel' in the model hyperparameters.\n",
      "\tRan out of time, early stopping on iteration 161.\n",
      "\tMany features detected (1202), dynamically setting 'colsample_bylevel' to 0.831946755407654 to speed up training (Default = 1).\n",
      "\tTo disable this functionality, explicitly specify 'colsample_bylevel' in the model hyperparameters.\n",
      "\tRan out of time, early stopping on iteration 173.\n",
      "\tMany features detected (1202), dynamically setting 'colsample_bylevel' to 0.831946755407654 to speed up training (Default = 1).\n",
      "\tTo disable this functionality, explicitly specify 'colsample_bylevel' in the model hyperparameters.\n",
      "\tRan out of time, early stopping on iteration 197.\n",
      "\tMany features detected (1202), dynamically setting 'colsample_bylevel' to 0.831946755407654 to speed up training (Default = 1).\n",
      "\tTo disable this functionality, explicitly specify 'colsample_bylevel' in the model hyperparameters.\n",
      "\tRan out of time, early stopping on iteration 236.\n",
      "\t0.5219\t = Validation score   (macro_f1)\n",
      "\t287.98s\t = Training   runtime\n",
      "\t0.13s\t = Validation runtime\n",
      "Fitting model: ExtraTreesGini_BAG_L1 ... Training model for up to 18.61s of the 18.61s of remaining time.\n",
      "\t0.4615\t = Validation score   (macro_f1)\n",
      "\t1.0s\t = Training   runtime\n",
      "\t1.47s\t = Validation runtime\n",
      "Fitting model: ExtraTreesEntr_BAG_L1 ... Training model for up to 15.91s of the 15.91s of remaining time.\n",
      "\t0.4567\t = Validation score   (macro_f1)\n",
      "\t0.98s\t = Training   runtime\n",
      "\t1.47s\t = Validation runtime\n",
      "Fitting model: XGBoost_BAG_L1 ... Training model for up to 13.25s of the 13.25s of remaining time.\n",
      "\tFitting 5 child models (S1F1 - S1F5) | Fitting with SequentialLocalFoldFittingStrategy\n",
      "\t0.5474\t = Validation score   (macro_f1)\n",
      "\t12.23s\t = Training   runtime\n",
      "\t0.21s\t = Validation runtime\n",
      "Fitting model: WeightedEnsemble_L2 ... Training model for up to 360.00s of the 0.42s of remaining time.\n",
      "\tEnsemble Weights: {'LightGBM_BAG_L1': 0.944, 'RandomForestGini_BAG_L1': 0.056}\n",
      "\t0.5488\t = Validation score   (macro_f1)\n",
      "\t0.32s\t = Training   runtime\n",
      "\t0.0s\t = Validation runtime\n",
      "AutoGluon training complete, total runtime = 599.97s ... Best model: WeightedEnsemble_L2 | Estimated inference throughput: 3076.2 rows/s (1195 batch size)\n",
      "TabularPredictor saved. To load, use: predictor = TabularPredictor.load(\"d:\\DevProject\\5-Model\\star-cup2025\\Model\\model\\autogluon_baseline\")\n",
      "These features in provided data are not utilized by the predictor and will be ignored: ['AST_AUM_BAL_MAX', 'total_page_visits', 'total_model_visits', 'module_d70f9f4b8952b0a2ad8879c7eb3d8813_last_visit', 'module_f922b886e82201a198513c4f00408a4b_last_visit', 'module_64def4748e4131f437ea29acb56ad113_last_visit', 'total_visit_count', 'daily_visits_mean', 'daily_visits_max', 'daily_visits_min', 'daily_visits_std', 'first_visit_days', 'last_visit_days', 'm0_visit_count', 'm0_page_nunique', 'm0_module_nunique', 'm1_visit_count', 'm1_page_nunique', 'm1_module_nunique', 'm2_visit_count', 'm2_page_nunique', 'm2_module_nunique', 'daily_unique_pages_mean', 'daily_unique_pages_max', 'daily_unique_pages_std', 'mb_trnflw_transcode_nunique_last_90d', 'mb_trnflw_top19_transcode_amount_sum', 'mb_trnflw_top19_transcode_amount_mean', 'mb_trnflw_top20_transcode_amount_mean', 'NATURE_AGE_BY_SEX_STD', 'aps_txn_count_7d', 'aps_txn_count_15d', 'aps_txn_count_30d', 'aps_txn_count_60d', 'aps_txn_count_90d', 'aps_last_month_amt', 'TPAY_MOTH_NET_RATE', 'TPAY_SEAN_NET_RATE']\n",
      "Computing feature importance via permutation shuffling for 1202 features using 1000 rows with 1 shuffle sets... Time limit: 60s...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "基线模型训练完成!\n",
      "================================================================================\n",
      "\n",
      "模型排行榜 (Top 10):\n",
      "                     model  score_val eval_metric  pred_time_val    fit_time  \\\n",
      "0      WeightedEnsemble_L2   0.548812    macro_f1       1.551278  149.984557   \n",
      "1          LightGBM_BAG_L1   0.548577    macro_f1       0.097759  148.366512   \n",
      "2           XGBoost_BAG_L1   0.547363    macro_f1       0.211467   12.233132   \n",
      "3        LightGBMXT_BAG_L1   0.546999    macro_f1       0.095562  129.862004   \n",
      "4          CatBoost_BAG_L1   0.521944    macro_f1       0.126367  287.981558   \n",
      "5  RandomForestEntr_BAG_L1   0.512627    macro_f1       1.465784    1.620651   \n",
      "6  RandomForestGini_BAG_L1   0.501106    macro_f1       1.452518    1.293785   \n",
      "7    ExtraTreesGini_BAG_L1   0.461482    macro_f1       1.467251    0.998220   \n",
      "8    ExtraTreesEntr_BAG_L1   0.456659    macro_f1       1.471992    0.977473   \n",
      "9    KNeighborsUnif_BAG_L1   0.280833    macro_f1       0.369342    0.154567   \n",
      "\n",
      "   pred_time_val_marginal  fit_time_marginal  stack_level  can_infer  \\\n",
      "0                0.001001           0.324261            2       True   \n",
      "1                0.097759         148.366512            1       True   \n",
      "2                0.211467          12.233132            1       True   \n",
      "3                0.095562         129.862004            1       True   \n",
      "4                0.126367         287.981558            1       True   \n",
      "5                1.465784           1.620651            1       True   \n",
      "6                1.452518           1.293785            1       True   \n",
      "7                1.467251           0.998220            1       True   \n",
      "8                1.471992           0.977473            1       True   \n",
      "9                0.369342           0.154567            1       True   \n",
      "\n",
      "   fit_order  \n",
      "0         11  \n",
      "1          4  \n",
      "2         10  \n",
      "3          3  \n",
      "4          7  \n",
      "5          6  \n",
      "6          5  \n",
      "7          8  \n",
      "8          9  \n",
      "9          1  \n",
      "\n",
      "最佳模型: WeightedEnsemble_L2\n",
      "最佳分数 (Macro-F1): 0.548812\n",
      "\n",
      "计算特征重要性...\n",
      "提示: 使用优化参数加速计算 (subsample_size=1000, num_shuffle_sets=1)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\t393.04s\t= Expected runtime (393.04s per shuffle set)\n",
      "\t64.6s\t= Actual runtime (Completed 1 of 1 shuffle sets)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Top 20 重要特征:\n",
      "                          importance  stddev  p_value  n  p99_high  p99_low\n",
      "NTRL_CUST_AGE               0.173154     NaN      NaN  1       NaN      NaN\n",
      "AST_YAVER_AUM_BAL           0.099054     NaN      NaN  1       NaN      NaN\n",
      "DEBT_LOAN_BAL_YAVER         0.050940     NaN      NaN  1       NaN      NaN\n",
      "NATURE_AGE_BY_RANK_MEAN     0.016385     NaN      NaN  1       NaN      NaN\n",
      "NATURE_AGE_BY_SEX_DIFF      0.015211     NaN      NaN  1       NaN      NaN\n",
      "NATURE_AGE_SEX_INTERACT     0.010563     NaN      NaN  1       NaN      NaN\n",
      "ASSET_SAVER_FA_BAL          0.010156     NaN      NaN  1       NaN      NaN\n",
      "AST_SAVER_AUM_BAL           0.009518     NaN      NaN  1       NaN      NaN\n",
      "ASSET_YAVER_DPSA_DIV_TD     0.005056     NaN      NaN  1       NaN      NaN\n",
      "NATURE_AGE_RANK_INTERACT    0.004885     NaN      NaN  1       NaN      NaN\n",
      "ASSET_SAVER_TD_BAL          0.003232     NaN      NaN  1       NaN      NaN\n",
      "ASSET_SAVER_TD_DIV_AUM      0.003232     NaN      NaN  1       NaN      NaN\n",
      "ASSET_MAVER_TD_DIV_AUM      0.002765     NaN      NaN  1       NaN      NaN\n",
      "NATURE_AGE_BY_RANK_DIFF     0.002316     NaN      NaN  1       NaN      NaN\n",
      "main_unit_ratio             0.002168     NaN      NaN  1       NaN      NaN\n",
      "aps_interval_min_90d        0.002168     NaN      NaN  1       NaN      NaN\n",
      "aps_month_mom_change        0.002168     NaN      NaN  1       NaN      NaN\n",
      "ASSET_DP_CV                 0.001832     NaN      NaN  1       NaN      NaN\n",
      "aps_amt_ratio_60d_90d       0.001665     NaN      NaN  1       NaN      NaN\n",
      "ASSET_AUM_CV                0.001434     NaN      NaN  1       NaN      NaN\n"
     ]
    }
   ],
   "source": [
    "# 快速基线模型训练\n",
    "# 用于快速验证数据质量和获取初步性能baseline\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"阶段1: 快速基线模型训练\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 准备训练数据\n",
    "train_data_ag = train_data[feature_cols].copy()\n",
    "\n",
    "# 确保FLAG是整数类型\n",
    "train_data_ag['FLAG'] = train_data_ag['FLAG'].astype(int)\n",
    "\n",
    "# 设置输出目录\n",
    "baseline_output_dir = './model/autogluon_baseline'\n",
    "os.makedirs(baseline_output_dir, exist_ok=True)\n",
    "\n",
    "print(f\"\\n模型输出目录: {baseline_output_dir}\")\n",
    "print(f\"训练数据形状: {train_data_ag.shape}\")\n",
    "print(f\"特征数: {train_data_ag.shape[1] - 1}\")\n",
    "print(f\"目标变量: FLAG (类别数: {train_data_ag['FLAG'].nunique()})\")\n",
    "\n",
    "# 快速训练配置\n",
    "baseline_time_limit = 600  # 10分钟快速测试\n",
    "baseline_preset = 'medium_quality'\n",
    "\n",
    "print(f\"\\n训练配置:\")\n",
    "print(f\"  - 预设质量: {baseline_preset}\")\n",
    "print(f\"  - 时间限制: {baseline_time_limit}秒 ({baseline_time_limit//60}分钟)\")\n",
    "print(f\"  - 评估指标: macro_f1\")\n",
    "print(f\"  - 交叉验证: 5折\")\n",
    "\n",
    "print(f\"\\n开始训练...(预计 {baseline_time_limit//60} 分钟)\")\n",
    "print(\"-\"*80)\n",
    "\n",
    "# 训练基线模型\n",
    "try:\n",
    "    predictor_baseline = TabularPredictor(\n",
    "        label='FLAG',\n",
    "        problem_type='multiclass',\n",
    "        eval_metric=macro_f1_scorer,\n",
    "        path=baseline_output_dir,\n",
    "        verbosity=2\n",
    "    ).fit(\n",
    "        train_data=train_data_ag,\n",
    "        time_limit=baseline_time_limit,\n",
    "        presets=baseline_preset,\n",
    "        num_bag_folds=5,\n",
    "        num_bag_sets=1,\n",
    "        num_stack_levels=0,  # 快速训练不做stacking\n",
    "        hyperparameters='default',\n",
    "        verbosity=2\n",
    "    )\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(\"基线模型训练完成!\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 查看模型性能\n",
    "    leaderboard_baseline = predictor_baseline.leaderboard(silent=True)\n",
    "    print(\"\\n模型排行榜 (Top 10):\")\n",
    "    print(leaderboard_baseline.head(10))\n",
    "    \n",
    "    # 最佳模型\n",
    "    best_model_name = leaderboard_baseline.iloc[0]['model']\n",
    "    best_score = leaderboard_baseline.iloc[0]['score_val']\n",
    "    print(f\"\\n最佳模型: {best_model_name}\")\n",
    "    print(f\"最佳分数 (Macro-F1): {best_score:.6f}\")\n",
    "    \n",
    "    # 特征重要性 (优化版本 - 加速计算)\n",
    "    print(f\"\\n计算特征重要性...\")\n",
    "    print(f\"提示: 使用优化参数加速计算 (subsample_size=1000, num_shuffle_sets=1)\")\n",
    "    \n",
    "    feature_importance = predictor_baseline.feature_importance(\n",
    "        data=train_data_ag,\n",
    "        subsample_size=1000,  # 只使用1000个样本 (从5975减少)\n",
    "        num_shuffle_sets=1,   # 只shuffle 1次 (从5次减少)\n",
    "        time_limit=60,        # 限制60秒\n",
    "        silent=False\n",
    "    )\n",
    "    print(f\"\\nTop 20 重要特征:\")\n",
    "    print(feature_importance.head(20))\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"\\n训练出错: {str(e)}\")\n",
    "    import traceback\n",
    "    traceback.print_exc()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b4163b0c",
   "metadata": {},
   "source": [
    "## AutoGluon训练 - 阶段2: 高质量模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8671a25f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 高质量模型训练 - 充分利用AutoGluon的能力\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"阶段2: 高质量模型训练 (针对Macro-F1优化)\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 设置输出目录\n",
    "main_output_dir = './model/autogluon_main'\n",
    "os.makedirs(main_output_dir, exist_ok=True)\n",
    "\n",
    "print(f\"\\n模型输出目录: {main_output_dir}\")\n",
    "\n",
    "# 高质量训练配置\n",
    "main_time_limit = 3600 * 4  # 4小时\n",
    "main_preset = 'best_quality'\n",
    "\n",
    "# 自定义超参数配置 - 针对多分类和不平衡数据优化\n",
    "hyperparameters = {\n",
    "    'GBM': [\n",
    "        {'extra_trees': True, 'ag_args': {'name_suffix': 'XT'}},\n",
    "        {},\n",
    "        'GBMLarge',\n",
    "    ],\n",
    "    'CAT': {},\n",
    "    'XGB': {},\n",
    "    'FASTAI': {},\n",
    "    'NN_TORCH': {},\n",
    "    'LR': {},\n",
    "    'RF': [\n",
    "        {'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},\n",
    "        {'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},\n",
    "    ],\n",
    "    'XT': [\n",
    "        {'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},\n",
    "        {'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},\n",
    "    ],\n",
    "}\n",
    "\n",
    "# 样本权重处理 - 针对不平衡数据\n",
    "# 计算类别权重\n",
    "from sklearn.utils.class_weight import compute_class_weight\n",
    "class_weights = compute_class_weight(\n",
    "    'balanced',\n",
    "    classes=np.unique(train_data_ag['FLAG']),\n",
    "    y=train_data_ag['FLAG']\n",
    ")\n",
    "class_weight_dict = dict(zip(np.unique(train_data_ag['FLAG']), class_weights))\n",
    "\n",
    "print(f\"\\n类别权重 (用于处理样本不平衡):\")\n",
    "for cls, weight in class_weight_dict.items():\n",
    "    print(f\"  类别 {int(cls)}: {weight:.4f}\")\n",
    "\n",
    "# 根据类别权重计算样本权重\n",
    "sample_weights = train_data_ag['FLAG'].map(class_weight_dict).values\n",
    "\n",
    "print(f\"\\n训练配置:\")\n",
    "print(f\"  - 预设质量: {main_preset}\")\n",
    "print(f\"  - 时间限制: {main_time_limit}秒 ({main_time_limit//3600}小时)\")\n",
    "print(f\"  - 评估指标: macro_f1\")\n",
    "print(f\"  - 交叉验证: 5折 Bagging\")\n",
    "print(f\"  - Stacking层数: 1层\")\n",
    "print(f\"  - 样本权重: 启用 (处理类别不平衡)\")\n",
    "print(f\"  - 自定义模型: LightGBM, CatBoost, XGBoost, RandomForest, ExtraTrees, NeuralNet, FastAI, LR\")\n",
    "\n",
    "print(f\"\\n开始训练...(预计 {main_time_limit//3600} 小时)\")\n",
    "print(\"提示: 这将需要较长时间,请耐心等待\")\n",
    "print(\"-\"*80)\n",
    "\n",
    "# 训练主模型\n",
    "try:\n",
    "    predictor_main = TabularPredictor(\n",
    "        label='FLAG',\n",
    "        problem_type='multiclass',\n",
    "        eval_metric=macro_f1_scorer,\n",
    "        path=main_output_dir,\n",
    "        verbosity=2\n",
    "    ).fit(\n",
    "        train_data=train_data_ag,\n",
    "        time_limit=main_time_limit,\n",
    "        presets=main_preset,\n",
    "        hyperparameters=hyperparameters,\n",
    "        num_bag_folds=5,  # 5折bagging\n",
    "        num_bag_sets=1,\n",
    "        num_stack_levels=1,  # 1层stacking\n",
    "        sample_weight=sample_weights,  # 使用样本权重\n",
    "        ag_args_fit={\n",
    "            'num_cpus': 'auto',\n",
    "            'num_gpus': 'auto',\n",
    "        },\n",
    "        excluded_model_types=['KNN'],  # 排除KNN(太慢)\n",
    "        verbosity=2\n",
    "    )\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(\"主模型训练完成!\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 详细的模型性能评估\n",
    "    leaderboard_main = predictor_main.leaderboard(silent=True)\n",
    "    print(\"\\n完整模型排行榜:\")\n",
    "    print(leaderboard_main)\n",
    "    \n",
    "    # 保存排行榜\n",
    "    leaderboard_path = os.path.join(main_output_dir, 'leaderboard.csv')\n",
    "    leaderboard_main.to_csv(leaderboard_path, index=False)\n",
    "    print(f\"\\n排行榜已保存到: {leaderboard_path}\")\n",
    "    \n",
    "    # 最佳模型信息\n",
    "    best_model = predictor_main.get_model_best()\n",
    "    best_score = leaderboard_main.iloc[0]['score_val']\n",
    "    print(f\"\\n最佳模型: {best_model}\")\n",
    "    print(f\"最佳验证分数 (Macro-F1): {best_score:.6f}\")\n",
    "    \n",
    "    # 特征重要性分析 (优化版本)\n",
    "    print(\"\\n计算特征重要性...\")\n",
    "    print(f\"提示: 使用优化参数加速计算\")\n",
    "    print(f\"  - subsample_size=2000 (从{len(train_data_ag)}减少)\")\n",
    "    print(f\"  - num_shuffle_sets=3 (平衡精度和速度)\")\n",
    "    print(f\"  - time_limit=300秒 (5分钟)\")\n",
    "    \n",
    "    feature_importance = predictor_main.feature_importance(\n",
    "        data=train_data_ag,\n",
    "        subsample_size=2000,  # 使用2000个样本 (主模型用更多样本)\n",
    "        num_shuffle_sets=3,   # shuffle 3次 (平衡精度和速度)\n",
    "        time_limit=300,       # 限制5分钟\n",
    "        silent=False\n",
    "    )\n",
    "    \n",
    "    # 保存特征重要性\n",
    "    fi_path = os.path.join(main_output_dir, 'feature_importance.csv')\n",
    "    feature_importance.to_csv(fi_path)\n",
    "    print(f\"\\n特征重要性已保存到: {fi_path}\")\n",
    "    \n",
    "    print(f\"\\nTop 30 重要特征:\")\n",
    "    print(feature_importance.head(30))\n",
    "    \n",
    "    # 可视化特征重要性 (Top 20)\n",
    "    plt.figure(figsize=(10, 8))\n",
    "    top_features = feature_importance.head(20)\n",
    "    plt.barh(range(len(top_features)), top_features['importance'])\n",
    "    plt.yticks(range(len(top_features)), top_features['feature'])\n",
    "    plt.xlabel('Importance')\n",
    "    plt.title('Top 20 Feature Importance')\n",
    "    plt.gca().invert_yaxis()\n",
    "    plt.tight_layout()\n",
    "    fi_plot_path = os.path.join(main_output_dir, 'feature_importance_plot.png')\n",
    "    plt.savefig(fi_plot_path, dpi=300, bbox_inches='tight')\n",
    "    print(f\"特征重要性图已保存到: {fi_plot_path}\")\n",
    "    plt.close()\n",
    "    \n",
    "    # 模型摘要\n",
    "    print(f\"\\n模型摘要:\")\n",
    "    print(f\"  - 总模型数: {len(leaderboard_main)}\")\n",
    "    print(f\"  - 最佳模型: {best_model}\")\n",
    "    print(f\"  - 验证集Macro-F1: {best_score:.6f}\")\n",
    "    print(f\"  - 训练时间: {leaderboard_main.iloc[0]['fit_time']:.2f}秒\")\n",
    "    print(f\"  - 预测时间: {leaderboard_main.iloc[0]['pred_time_val']:.2f}秒\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"\\n训练出错: {str(e)}\")\n",
    "    import traceback\n",
    "    traceback.print_exc()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2d63bc21",
   "metadata": {},
   "source": [
    "## 模型预测与提交文件生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6be5807",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用最佳模型进行预测\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"模型预测\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 准备测试数据\n",
    "test_data_ag = test_data[feature_cols].copy()\n",
    "test_data_ag = test_data_ag.drop(columns=['FLAG'], errors='ignore')\n",
    "\n",
    "print(f\"\\n测试集形状: {test_data_ag.shape}\")\n",
    "print(f\"测试样本数: {len(test_data_ag)}\")\n",
    "\n",
    "# 使用主模型进行预测\n",
    "print(f\"\\n使用模型进行预测...\")\n",
    "predictions = predictor_main.predict(test_data_ag)\n",
    "\n",
    "print(f\"预测完成!\")\n",
    "print(f\"预测结果形状: {predictions.shape}\")\n",
    "print(f\"预测类别: {np.unique(predictions)}\")\n",
    "\n",
    "# 预测类别分布\n",
    "pred_dist = pd.Series(predictions).value_counts().sort_index()\n",
    "print(f\"\\n预测类别分布:\")\n",
    "for flag, count in pred_dist.items():\n",
    "    rate = count / len(predictions) * 100\n",
    "    print(f\"  类别 {int(flag)}: {count} ({rate:.2f}%)\")\n",
    "\n",
    "# 生成提交文件\n",
    "print(f\"\\n生成提交文件...\")\n",
    "submission = pd.DataFrame({\n",
    "    'CUST_NO': test_cust_no,\n",
    "    'FLAG': predictions.astype(int)\n",
    "})\n",
    "\n",
    "# 保存提交文件\n",
    "submission_dir = './model/submissions'\n",
    "os.makedirs(submission_dir, exist_ok=True)\n",
    "\n",
    "# 文件名包含时间戳和分数\n",
    "from datetime import datetime\n",
    "timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')\n",
    "submission_filename = f'upload_{timestamp}_score_{best_score:.6f}.csv'\n",
    "submission_path = os.path.join(submission_dir, submission_filename)\n",
    "\n",
    "# 保存 (无表头,按题目要求)\n",
    "submission.to_csv(submission_path, index=False, header=False)\n",
    "\n",
    "print(f\"提交文件已保存到: {submission_path}\")\n",
    "print(f\"\\n提交文件预览:\")\n",
    "print(submission.head(10))\n",
    "\n",
    "# 同时保存一份带表头的方便查看\n",
    "submission_with_header_path = submission_path.replace('.csv', '_with_header.csv')\n",
    "submission.to_csv(submission_with_header_path, index=False, header=True)\n",
    "print(f\"\\n带表头版本已保存到: {submission_with_header_path}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"预测完成!\")\n",
    "print(\"=\"*80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "86081d6f",
   "metadata": {},
   "source": [
    "## 模型分析与优化建议"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d66fc36",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型性能深度分析\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"模型性能深度分析\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 1. 模型融合信息\n",
    "print(\"\\n1. 模型融合信息:\")\n",
    "try:\n",
    "    model_names = predictor_main.model_names()\n",
    "    print(f\"   总模型数: {len(model_names)}\")\n",
    "    print(f\"   模型列表: {model_names[:10]}...\")  # 显示前10个\n",
    "    \n",
    "    # Bagging和Stacking信息\n",
    "    weighted_ensemble = [m for m in model_names if 'WeightedEnsemble' in m]\n",
    "    if weighted_ensemble:\n",
    "        print(f\"   集成模型: {weighted_ensemble}\")\n",
    "except:\n",
    "    pass\n",
    "\n",
    "# 2. 交叉验证性能\n",
    "print(\"\\n2. 交叉验证性能分析:\")\n",
    "try:\n",
    "    # 获取各个模型的交叉验证分数\n",
    "    leaderboard_full = predictor_main.leaderboard(silent=True)\n",
    "    \n",
    "    print(f\"\\n   Top 5 模型:\")\n",
    "    for idx, row in leaderboard_full.head(5).iterrows():\n",
    "        print(f\"   {row['model']}: {row['score_val']:.6f}\")\n",
    "    \n",
    "    # 模型性能对比\n",
    "    base_models = leaderboard_full[~leaderboard_full['model'].str.contains('Ensemble|Stack')]\n",
    "    if len(base_models) > 0:\n",
    "        print(f\"\\n   基础模型最佳: {base_models.iloc[0]['model']} - {base_models.iloc[0]['score_val']:.6f}\")\n",
    "    \n",
    "    ensemble_models = leaderboard_full[leaderboard_full['model'].str.contains('Ensemble|Stack')]\n",
    "    if len(ensemble_models) > 0:\n",
    "        print(f\"   集成模型最佳: {ensemble_models.iloc[0]['model']} - {ensemble_models.iloc[0]['score_val']:.6f}\")\n",
    "        \n",
    "except Exception as e:\n",
    "    print(f\"   分析失败: {str(e)}\")\n",
    "\n",
    "# 3. 特征重要性分析\n",
    "print(\"\\n3. 特征重要性分析:\")\n",
    "try:\n",
    "    # 获取最重要的特征\n",
    "    top_10_features = feature_importance.head(10)['feature'].tolist()\n",
    "    print(f\"   Top 10 最重要特征:\")\n",
    "    for i, feat in enumerate(top_10_features, 1):\n",
    "        imp = feature_importance[feature_importance['feature'] == feat]['importance'].values[0]\n",
    "        print(f\"   {i}. {feat}: {imp:.4f}\")\n",
    "    \n",
    "    # 特征重要性统计\n",
    "    total_importance = feature_importance['importance'].sum()\n",
    "    top_20_importance = feature_importance.head(20)['importance'].sum()\n",
    "    top_50_importance = feature_importance.head(50)['importance'].sum()\n",
    "    \n",
    "    print(f\"\\n   重要性累积:\")\n",
    "    print(f\"   Top 20 特征贡献: {top_20_importance/total_importance*100:.2f}%\")\n",
    "    print(f\"   Top 50 特征贡献: {top_50_importance/total_importance*100:.2f}%\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"   分析失败: {str(e)}\")\n",
    "\n",
    "# 4. 预测概率分析 (如果需要)\n",
    "print(\"\\n4. 预测置信度分析:\")\n",
    "try:\n",
    "    # 获取预测概率\n",
    "    pred_proba = predictor_main.predict_proba(test_data_ag)\n",
    "    \n",
    "    # 计算每个预测的最大概率 (置信度)\n",
    "    max_proba = pred_proba.max(axis=1)\n",
    "    \n",
    "    print(f\"   预测置信度统计:\")\n",
    "    print(f\"   平均置信度: {max_proba.mean():.4f}\")\n",
    "    print(f\"   最小置信度: {max_proba.min():.4f}\")\n",
    "    print(f\"   最大置信度: {max_proba.max():.4f}\")\n",
    "    print(f\"   中位数置信度: {np.median(max_proba):.4f}\")\n",
    "    \n",
    "    # 低置信度预测\n",
    "    low_confidence = (max_proba < 0.5).sum()\n",
    "    print(f\"   低置信度样本数 (<0.5): {low_confidence} ({low_confidence/len(max_proba)*100:.2f}%)\")\n",
    "    \n",
    "    # 保存预测概率\n",
    "    pred_proba_path = os.path.join(main_output_dir, 'prediction_probabilities.csv')\n",
    "    pred_proba_df = pd.DataFrame(pred_proba, columns=[f'prob_class_{i}' for i in pred_proba.columns])\n",
    "    pred_proba_df['CUST_NO'] = test_cust_no.values\n",
    "    pred_proba_df['predicted_class'] = predictions\n",
    "    pred_proba_df['max_probability'] = max_proba\n",
    "    pred_proba_df.to_csv(pred_proba_path, index=False)\n",
    "    print(f\"   预测概率已保存到: {pred_proba_path}\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"   分析失败: {str(e)}\")\n",
    "\n",
    "# 5. 优化建议\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"5. 模型优化建议:\")\n",
    "print(\"=\"*80)\n",
    "print(\"\"\"\n",
    "基于当前模型性能,以下是优化建议:\n",
    "\n",
    "1. 特征工程优化:\n",
    "   - 根据特征重要性,重点优化Top 20特征\n",
    "   - 尝试特征交互 (组合重要特征)\n",
    "   - 添加时间序列特征 (趋势、周期性)\n",
    "   - 文本特征深度挖掘 (TF-IDF, Word2Vec)\n",
    "\n",
    "2. 数据增强:\n",
    "   - SMOTE过采样处理少数类\n",
    "   - 数据增强技术 (噪声注入、特征扰动)\n",
    "   - 半监督学习 (利用未标注数据)\n",
    "\n",
    "3. 模型优化:\n",
    "   - 增加训练时间 (time_limit)\n",
    "   - 调整hyperparameters (针对表现好的模型)\n",
    "   - 增加num_bag_folds (更多折数)\n",
    "   - 尝试不同的sample_weight策略\n",
    "\n",
    "4. 集成策略:\n",
    "   - 多个AutoGluon模型集成\n",
    "   - 与手工调参的LightGBM/CatBoost融合\n",
    "   - Blending不同seed的模型\n",
    "\n",
    "5. 后处理:\n",
    "   - 阈值优化 (针对每个类别)\n",
    "   - 概率校准 (Platt Scaling, Isotonic)\n",
    "   - 规则约束 (业务逻辑)\n",
    "\n",
    "6. 交叉验证策略:\n",
    "   - 使用StratifiedKFold确保类别平衡\n",
    "   - 时间序列数据使用TimeSeriesSplit\n",
    "   - GroupKFold防止数据泄露\n",
    "\"\"\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"分析完成!\")\n",
    "print(\"=\"*80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "01558f9d",
   "metadata": {},
   "source": [
    "## 进阶优化 - 超参数调优 (可选)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5587154",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 进阶优化 - 针对最佳模型进行超参数调优\n",
    "# 此代码块为可选,如果基线性能已经不错,可以尝试进一步优化\n",
    "\n",
    "\"\"\"\n",
    "本模块提供超参数调优功能\n",
    "如需运行,请取消注释以下代码\n",
    "\n",
    "# 超参数调优配置\n",
    "hyperparameter_tune_kwargs = {\n",
    "    'num_trials': 50,  # 调参次数\n",
    "    'scheduler': 'local',\n",
    "    'searcher': 'auto',  # 自动选择搜索策略\n",
    "}\n",
    "\n",
    "# 设置输出目录\n",
    "tuned_output_dir = './model/autogluon_tuned'\n",
    "os.makedirs(tuned_output_dir, exist_ok=True)\n",
    "\n",
    "print(\"\\\\n\" + \"=\"*80)\n",
    "print(\"阶段3: 超参数调优\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "try:\n",
    "    predictor_tuned = TabularPredictor(\n",
    "        label='FLAG',\n",
    "        problem_type='multiclass',\n",
    "        eval_metric=macro_f1_scorer,\n",
    "        path=tuned_output_dir,\n",
    "        verbosity=2\n",
    "    ).fit(\n",
    "        train_data=train_data_ag,\n",
    "        time_limit=3600 * 2,  # 2小时\n",
    "        presets='best_quality',\n",
    "        hyperparameters='default',\n",
    "        hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,\n",
    "        num_bag_folds=5,\n",
    "        num_stack_levels=1,\n",
    "        sample_weight=sample_weights,\n",
    "        verbosity=2\n",
    "    )\n",
    "    \n",
    "    print(\"\\\\n超参数调优完成!\")\n",
    "    leaderboard_tuned = predictor_tuned.leaderboard(silent=True)\n",
    "    print(\"\\\\n调优后模型排行榜:\")\n",
    "    print(leaderboard_tuned.head(10))\n",
    "    \n",
    "    best_tuned_score = leaderboard_tuned.iloc[0]['score_val']\n",
    "    print(f\"\\\\n调优后最佳分数: {best_tuned_score:.6f}\")\n",
    "    print(f\"原最佳分数: {best_score:.6f}\")\n",
    "    print(f\"提升: {(best_tuned_score - best_score):.6f}\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"调优失败: {str(e)}\")\n",
    "    import traceback\n",
    "    traceback.print_exc()\n",
    "\"\"\"\n",
    "\n",
    "print(\"\\n超参数调优模块 (已注释)\")\n",
    "print(\"如需使用,请取消上方代码注释\")\n",
    "print(\"\\n提示: 超参数调优需要较长时间,建议在基线模型表现良好后再使用\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fdf4fce1",
   "metadata": {},
   "source": [
    "## 模型保存与加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "befeed95",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型已自动保存到指定目录\n",
    "# AutoGluon会自动保存模型,无需手动保存\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"模型保存与加载\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "print(f\"\\n模型已自动保存到: {main_output_dir}\")\n",
    "\n",
    "# 如何加载模型\n",
    "print(\"\\n如何加载已保存的模型:\")\n",
    "print(\"```python\")\n",
    "print(f\"from autogluon.tabular import TabularPredictor\")\n",
    "print(f\"predictor = TabularPredictor.load('{main_output_dir}')\")\n",
    "print(\"```\")\n",
    "\n",
    "# 模型目录结构\n",
    "print(\"\\n模型目录结构:\")\n",
    "print(f\"{main_output_dir}/\")\n",
    "print(\"  |- models/           # 所有训练的模型\")\n",
    "print(\"  |- utils/            # 工具文件\")\n",
    "print(\"  |- predictor.pkl     # 预测器对象\")\n",
    "print(\"  |- leaderboard.csv   # 模型排行榜\")\n",
    "print(\"  |- feature_importance.csv  # 特征重要性\")\n",
    "\n",
    "# 测试加载模型\n",
    "print(\"\\n测试加载模型...\")\n",
    "try:\n",
    "    loaded_predictor = TabularPredictor.load(main_output_dir)\n",
    "    print(\"模型加载成功!\")\n",
    "    print(f\"最佳模型: {loaded_predictor.get_model_best()}\")\n",
    "    \n",
    "    # 验证预测一致性\n",
    "    test_sample = test_data_ag.head(5)\n",
    "    pred_original = predictor_main.predict(test_sample)\n",
    "    pred_loaded = loaded_predictor.predict(test_sample)\n",
    "    \n",
    "    if np.array_equal(pred_original, pred_loaded):\n",
    "        print(\"加载的模型预测结果与原模型一致!\")\n",
    "    else:\n",
    "        print(\"警告: 加载的模型预测结果与原模型不一致\")\n",
    "    \n",
    "except Exception as e:\n",
    "    print(f\"模型加载失败: {str(e)}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1ee006b4",
   "metadata": {},
   "source": [
    "## 训练总结与下一步工作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "022a36fe",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练流程总结\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"训练流程总结\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "summary = f\"\"\"\n",
    "## AutoGluon建模训练完整流程总结\n",
    "\n",
    "### 1. 数据准备\n",
    "- 加载的特征文件数: {len(features_dict)}\n",
    "- 合并后总样本数: {len(full_data)}\n",
    "- 训练集样本数: {len(train_data)}\n",
    "- 测试集样本数: {len(test_data)}\n",
    "- 最终特征数: {len(feature_cols) - 1}\n",
    "- 目标变量类别数: {train_data['FLAG'].nunique()}\n",
    "\n",
    "### 2. 模型训练\n",
    "- 训练策略: 两阶段训练 (快速baseline + 高质量主模型)\n",
    "- 评估指标: Macro-F1\n",
    "- 交叉验证: 5折 StratifiedKFold\n",
    "- 样本权重: 启用 (处理类别不平衡)\n",
    "- 集成策略: Bagging + Stacking\n",
    "- 模型类型: LightGBM, CatBoost, XGBoost, RandomForest, ExtraTrees, NeuralNet, FastAI, LR\n",
    "\n",
    "### 3. 模型性能\n",
    "- 最佳模型: {best_model}\n",
    "- 验证集Macro-F1: {best_score:.6f}\n",
    "- 总训练模型数: {len(leaderboard_main)}\n",
    "\n",
    "### 4. 输出文件\n",
    "- 模型目录: {main_output_dir}\n",
    "- 提交文件: {submission_path}\n",
    "- 排行榜: {os.path.join(main_output_dir, 'leaderboard.csv')}\n",
    "- 特征重要性: {os.path.join(main_output_dir, 'feature_importance.csv')}\n",
    "\n",
    "### 5. 下一步工作建议\n",
    "\n",
    "#### 5.1 特征优化方向\n",
    "- 基于特征重要性,深化Top特征的特征工程\n",
    "- 构建特征交互项 (重要特征之间的组合)\n",
    "- 时间窗口特征优化 (滑动窗口、趋势特征)\n",
    "- 文本特征深度挖掘 (Word2Vec, BERT embedding)\n",
    "\n",
    "#### 5.2 模型优化方向\n",
    "- 增加训练时间获得更好的模型\n",
    "- 尝试不同的ensemble权重\n",
    "- 多种AutoGluon模型投票融合\n",
    "- 与手工调参模型融合\n",
    "\n",
    "#### 5.3 样本优化方向\n",
    "- SMOTE/ADASYN过采样\n",
    "- 数据增强技术\n",
    "- 困难样本挖掘\n",
    "- 半监督学习\n",
    "\n",
    "#### 5.4 后处理优化\n",
    "- 阈值优化 (每个类别独立阈值)\n",
    "- 概率校准\n",
    "- 业务规则约束\n",
    "\n",
    "#### 5.5 验证策略\n",
    "- 时间序列交叉验证\n",
    "- 分层采样验证\n",
    "- 留出验证集\n",
    "- A/B榜一致性验证\n",
    "\n",
    "### 6. 关键参考资料\n",
    "- AutoGluon官方文档: https://auto.gluon.ai/\n",
    "- 往年赛题经验 (已分析)\n",
    "- 建模方案文档: 建模方案_v3.md\n",
    "\n",
    "### 7. 注意事项\n",
    "- 防止过拟合: 严格的交叉验证,不要在测试集上调参\n",
    "- 数据泄露检测: 检查特征是否包含未来信息\n",
    "- 类别不平衡: 持续关注各类别的F1 score\n",
    "- 时间一致性: 确保A/B榜数据处理一致\n",
    "- 可复现性: 固定随机种子,记录所有参数\n",
    "\n",
    "### 8. 提交前检查清单\n",
    "- [ ] 提交文件格式正确 (无表头,逗号分隔)\n",
    "- [ ] 客户号与预测结果一一对应\n",
    "- [ ] 预测类别在合法范围内 (1-10)\n",
    "- [ ] 没有缺失值\n",
    "- [ ] 文件编码为UTF-8\n",
    "- [ ] 样本数与测试集一致\n",
    "\n",
    "---\n",
    "生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n",
    "\"\"\"\n",
    "\n",
    "print(summary)\n",
    "\n",
    "# 保存总结到文件\n",
    "summary_path = os.path.join(main_output_dir, 'training_summary.md')\n",
    "with open(summary_path, 'w', encoding='utf-8') as f:\n",
    "    f.write(summary)\n",
    "\n",
    "print(f\"\\n训练总结已保存到: {summary_path}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"全部完成! 祝比赛取得好成绩!\")\n",
    "print(\"=\"*80)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
