{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "283025d5",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "73bd3553",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8440031f",
   "metadata": {},
   "source": [
    "# 数据加载"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cf7f31e8",
   "metadata": {},
   "source": [
    "## 训练集目标客户加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ca3bad1e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>DATA_DAT</th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>FLAG</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>20250630</td>\n",
       "      <td>2faac6549cf552d128f8bec626c99240</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>20250630</td>\n",
       "      <td>42767ba2f4c8963e17fbf91e5007b626</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>20250630</td>\n",
       "      <td>af02816658b8211ca6128b2239feb8ac</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>20250630</td>\n",
       "      <td>cb9e2a9ace6608fb51cb68632780362d</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>20250630</td>\n",
       "      <td>2c0feec8fa0ffb75831f566d0797a8b4</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   DATA_DAT                           CUST_NO  FLAG\n",
       "0  20250630  2faac6549cf552d128f8bec626c99240     1\n",
       "1  20250630  42767ba2f4c8963e17fbf91e5007b626     1\n",
       "2  20250630  af02816658b8211ca6128b2239feb8ac     1\n",
       "3  20250630  cb9e2a9ace6608fb51cb68632780362d     1\n",
       "4  20250630  2c0feec8fa0ffb75831f566d0797a8b4     1"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_target_cust = pd.read_csv('../Train_Data/TRAIN_TARGET_INFO.csv')\n",
    "train_target_cust.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9450f7e7",
   "metadata": {},
   "source": [
    "## 测试集目标客户加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d0e83452",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>DATA_DAT</th>\n",
       "      <th>CUST_NO</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>20250731</td>\n",
       "      <td>13ef4241a1959ccbcf8d8a30f0ed9d50</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>20250731</td>\n",
       "      <td>029dede087234ee034590abefc4731a9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>20250731</td>\n",
       "      <td>929838a9271aa18da0ad8cb5154ce591</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>20250731</td>\n",
       "      <td>51b04b6d47643e0f5303c38a429557d5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>20250731</td>\n",
       "      <td>a54db8a4f36e43e9390cf3e43d45f308</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   DATA_DAT                           CUST_NO\n",
       "0  20250731  13ef4241a1959ccbcf8d8a30f0ed9d50\n",
       "1  20250731  029dede087234ee034590abefc4731a9\n",
       "2  20250731  929838a9271aa18da0ad8cb5154ce591\n",
       "3  20250731  51b04b6d47643e0f5303c38a429557d5\n",
       "4  20250731  a54db8a4f36e43e9390cf3e43d45f308"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_target_cust = pd.read_csv('../DATA/A_TARGET.csv')\n",
    "test_target_cust.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "850cb671",
   "metadata": {},
   "source": [
    "## 特征文件加载"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eef572c1",
   "metadata": {},
   "source": [
    "### 通用pkl加载函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1e262d1e",
   "metadata": {},
   "outputs": [],
   "source": [
    "class FeatureLoader:\n",
    "    \"\"\"\n",
    "    特征文件加载器\n",
    "    支持批量加载feature目录下的所有pkl文件\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, feature_dir='./feature'):\n",
    "        \"\"\"\n",
    "        初始化加载器\n",
    "        \n",
    "        参数:\n",
    "        - feature_dir: 特征文件目录路径\n",
    "        \"\"\"\n",
    "        self.feature_dir = feature_dir\n",
    "        self.features_dict = {}\n",
    "        self.feature_info = {}\n",
    "        \n",
    "    def load_single_feature(self, file_path):\n",
    "        \"\"\"\n",
    "        加载单个pkl特征文件\n",
    "        \n",
    "        参数:\n",
    "        - file_path: pkl文件路径\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 特征数据\n",
    "        \"\"\"\n",
    "        try:\n",
    "            with open(file_path, 'rb') as f:\n",
    "                data = pickle.load(f)\n",
    "            \n",
    "            if not isinstance(data, pd.DataFrame):\n",
    "                raise ValueError(f\"文件 {file_path} 不是DataFrame格式\")\n",
    "            \n",
    "            return data\n",
    "        except Exception as e:\n",
    "            print(f\"加载文件 {file_path} 失败: {str(e)}\")\n",
    "            return None\n",
    "    \n",
    "    def load_all_features(self, pattern='*.pkl'):\n",
    "        \"\"\"\n",
    "        批量加载所有特征文件\n",
    "        \n",
    "        参数:\n",
    "        - pattern: 文件匹配模式\n",
    "        \n",
    "        返回:\n",
    "        - dict: {文件名: DataFrame}\n",
    "        \"\"\"\n",
    "        if not os.path.exists(self.feature_dir):\n",
    "            print(f\"目录不存在: {self.feature_dir}\")\n",
    "            return {}\n",
    "        \n",
    "        pkl_files = [f for f in os.listdir(self.feature_dir) if f.endswith('.pkl')]\n",
    "        \n",
    "        if not pkl_files:\n",
    "            print(f\"未找到pkl文件在目录: {self.feature_dir}\")\n",
    "            return {}\n",
    "        \n",
    "        print(f\"发现 {len(pkl_files)} 个特征文件\")\n",
    "        print(\"=\"*80)\n",
    "        \n",
    "        for pkl_file in pkl_files:\n",
    "            file_path = os.path.join(self.feature_dir, pkl_file)\n",
    "            file_name = os.path.splitext(pkl_file)[0]\n",
    "            \n",
    "            print(f\"\\n正在加载: {pkl_file}\")\n",
    "            data = self.load_single_feature(file_path)\n",
    "            \n",
    "            if data is not None:\n",
    "                self.features_dict[file_name] = data\n",
    "                \n",
    "                # 记录文件信息\n",
    "                self.feature_info[file_name] = {\n",
    "                    'file_path': file_path,\n",
    "                    'file_size_mb': os.path.getsize(file_path) / 1024 / 1024,\n",
    "                    'shape': data.shape,\n",
    "                    'memory_mb': data.memory_usage(deep=True).sum() / 1024 / 1024,\n",
    "                    'columns': data.columns.tolist()\n",
    "                }\n",
    "                \n",
    "                print(f\"  - 形状: {data.shape}\")\n",
    "                print(f\"  - 文件大小: {self.feature_info[file_name]['file_size_mb']:.2f} MB\")\n",
    "                print(f\"  - 内存占用: {self.feature_info[file_name]['memory_mb']:.2f} MB\")\n",
    "        \n",
    "        print(\"\\n\" + \"=\"*80)\n",
    "        print(f\"成功加载 {len(self.features_dict)} 个特征文件\")\n",
    "        \n",
    "        return self.features_dict\n",
    "    \n",
    "    def get_feature_summary(self):\n",
    "        \"\"\"\n",
    "        获取所有特征文件的汇总信息\n",
    "        \n",
    "        返回:\n",
    "        - DataFrame: 汇总表\n",
    "        \"\"\"\n",
    "        if not self.feature_info:\n",
    "            print(\"请先加载特征文件\")\n",
    "            return None\n",
    "        \n",
    "        summary_data = []\n",
    "        for name, info in self.feature_info.items():\n",
    "            summary_data.append({\n",
    "                '特征文件': name,\n",
    "                '样本数': info['shape'][0],\n",
    "                '特征数': info['shape'][1] - 1 if 'CUST_NO' in info['columns'] else info['shape'][1],\n",
    "                '文件大小(MB)': round(info['file_size_mb'], 2),\n",
    "                '内存占用(MB)': round(info['memory_mb'], 2)\n",
    "            })\n",
    "        \n",
    "        summary_df = pd.DataFrame(summary_data)\n",
    "        summary_df = summary_df.sort_values('特征数', ascending=False).reset_index(drop=True)\n",
    "        \n",
    "        # 添加汇总行\n",
    "        total_row = pd.DataFrame([{\n",
    "            '特征文件': '总计',\n",
    "            '样本数': '-',\n",
    "            '特征数': summary_df['特征数'].sum(),\n",
    "            '文件大小(MB)': round(summary_df['文件大小(MB)'].sum(), 2),\n",
    "            '内存占用(MB)': round(summary_df['内存占用(MB)'].sum(), 2)\n",
    "        }])\n",
    "        \n",
    "        summary_df = pd.concat([summary_df, total_row], ignore_index=True)\n",
    "        \n",
    "        return summary_df"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0ab626ec",
   "metadata": {},
   "source": [
    "### 加载训练集特征文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c530b6ca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征加载器已初始化\n",
      "发现 10 个特征文件\n",
      "================================================================================\n",
      "\n",
      "正在加载: TRAIN_AGET_PAY_features.pkl\n",
      "  - 形状: (4400, 47)\n",
      "  - 文件大小: 1.69 MB\n",
      "  - 内存占用: 1.92 MB\n",
      "\n",
      "正在加载: TRAIN_ASSET_features.pkl\n",
      "  - 形状: (48417, 139)\n",
      "  - 文件大小: 51.50 MB\n",
      "  - 内存占用: 53.98 MB\n",
      "\n",
      "正在加载: TRAIN_CCD_TR_DTL_features.pkl\n",
      "  - 形状: (169, 48)\n",
      "  - 文件大小: 0.07 MB\n",
      "  - 内存占用: 0.08 MB\n",
      "\n",
      "正在加载: TRAIN_MB_PAGEVIEW_DTL_features.pkl\n",
      "  - 形状: (24754, 503)\n",
      "  - 文件大小: 95.09 MB\n",
      "  - 内存占用: 96.34 MB\n",
      "\n",
      "正在加载: TRAIN_MB_TRNFLW_QRYTRNFLW_features.pkl\n",
      "  - 形状: (30002, 239)\n",
      "  - 文件大小: 55.26 MB\n",
      "  - 内存占用: 56.80 MB\n",
      "\n",
      "正在加载: TRAIN_NATURE_features.pkl\n",
      "  - 形状: (51397, 26)\n",
      "  - 文件大小: 10.35 MB\n",
      "  - 内存占用: 12.99 MB\n",
      "\n",
      "正在加载: TRAIN_PROD_HOLD_features.pkl\n",
      "  - 形状: (49448, 41)\n",
      "  - 文件大小: 14.29 MB\n",
      "  - 内存占用: 16.84 MB\n",
      "\n",
      "正在加载: TRAIN_TR_APS_DTL_features.pkl\n",
      "  - 形状: (48608, 309)\n",
      "  - 文件大小: 115.11 MB\n",
      "  - 内存占用: 117.61 MB\n",
      "\n",
      "正在加载: TRAIN_TR_IBTF_features.pkl\n",
      "  - 形状: (26375, 86)\n",
      "  - 文件大小: 17.29 MB\n",
      "  - 内存占用: 18.64 MB\n",
      "\n",
      "正在加载: TRAIN_TR_TPAY_features.pkl\n",
      "  - 形状: (30926, 65)\n",
      "  - 文件大小: 15.08 MB\n",
      "  - 内存占用: 16.66 MB\n",
      "\n",
      "================================================================================\n",
      "成功加载 10 个特征文件\n",
      "\n",
      "特征文件汇总:\n",
      "                                  特征文件    样本数   特征数  文件大小(MB)  内存占用(MB)\n",
      "0       TRAIN_MB_PAGEVIEW_DTL_features  24754   502     95.09     96.34\n",
      "1            TRAIN_TR_APS_DTL_features  48608   308    115.11    117.61\n",
      "2   TRAIN_MB_TRNFLW_QRYTRNFLW_features  30002   238     55.26     56.80\n",
      "3                 TRAIN_ASSET_features  48417   138     51.50     53.98\n",
      "4               TRAIN_TR_IBTF_features  26375    85     17.29     18.64\n",
      "5               TRAIN_TR_TPAY_features  30926    64     15.08     16.66\n",
      "6            TRAIN_CCD_TR_DTL_features    169    47      0.07      0.08\n",
      "7              TRAIN_AGET_PAY_features   4400    46      1.69      1.92\n",
      "8             TRAIN_PROD_HOLD_features  49448    40     14.29     16.84\n",
      "9                TRAIN_NATURE_features  51397    25     10.35     12.99\n",
      "10                                  总计      -  1493    375.73    391.86\n",
      "  - 形状: (48608, 309)\n",
      "  - 文件大小: 115.11 MB\n",
      "  - 内存占用: 117.61 MB\n",
      "\n",
      "正在加载: TRAIN_TR_IBTF_features.pkl\n",
      "  - 形状: (26375, 86)\n",
      "  - 文件大小: 17.29 MB\n",
      "  - 内存占用: 18.64 MB\n",
      "\n",
      "正在加载: TRAIN_TR_TPAY_features.pkl\n",
      "  - 形状: (30926, 65)\n",
      "  - 文件大小: 15.08 MB\n",
      "  - 内存占用: 16.66 MB\n",
      "\n",
      "================================================================================\n",
      "成功加载 10 个特征文件\n",
      "\n",
      "特征文件汇总:\n",
      "                                  特征文件    样本数   特征数  文件大小(MB)  内存占用(MB)\n",
      "0       TRAIN_MB_PAGEVIEW_DTL_features  24754   502     95.09     96.34\n",
      "1            TRAIN_TR_APS_DTL_features  48608   308    115.11    117.61\n",
      "2   TRAIN_MB_TRNFLW_QRYTRNFLW_features  30002   238     55.26     56.80\n",
      "3                 TRAIN_ASSET_features  48417   138     51.50     53.98\n",
      "4               TRAIN_TR_IBTF_features  26375    85     17.29     18.64\n",
      "5               TRAIN_TR_TPAY_features  30926    64     15.08     16.66\n",
      "6            TRAIN_CCD_TR_DTL_features    169    47      0.07      0.08\n",
      "7              TRAIN_AGET_PAY_features   4400    46      1.69      1.92\n",
      "8             TRAIN_PROD_HOLD_features  49448    40     14.29     16.84\n",
      "9                TRAIN_NATURE_features  51397    25     10.35     12.99\n",
      "10                                  总计      -  1493    375.73    391.86\n"
     ]
    }
   ],
   "source": [
    "# 创建加载器实例\n",
    "train_loader = FeatureLoader(feature_dir='./feature/Train')\n",
    "print(\"特征加载器已初始化\")\n",
    "\n",
    "# 加载所有特征文件\n",
    "train_features_dict = train_loader.load_all_features()\n",
    "\n",
    "# 查看特征文件汇总\n",
    "train_summary_df = train_loader.get_feature_summary()\n",
    "print(\"\\n特征文件汇总:\")\n",
    "print(train_summary_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "333e5a95",
   "metadata": {},
   "source": [
    "### 加载测试集特征文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ed5acaf8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征加载器已初始化\n",
      "发现 10 个特征文件\n",
      "================================================================================\n",
      "\n",
      "正在加载: A_AGET_PAY_features.pkl\n",
      "  - 形状: (530, 47)\n",
      "  - 文件大小: 0.21 MB\n",
      "  - 内存占用: 0.23 MB\n",
      "\n",
      "正在加载: A_ASSET_features.pkl\n",
      "  - 形状: (5624, 139)\n",
      "  - 文件大小: 5.99 MB\n",
      "  - 内存占用: 6.27 MB\n",
      "\n",
      "正在加载: A_CCD_TR_DTL_features.pkl\n",
      "  - 形状: (18, 48)\n",
      "  - 文件大小: 0.01 MB\n",
      "  - 内存占用: 0.01 MB\n",
      "\n",
      "正在加载: A_MB_PAGEVIEW_DTL_features.pkl\n",
      "  - 形状: (2753, 503)\n",
      "  - 文件大小: 10.60 MB\n",
      "  - 内存占用: 10.71 MB\n",
      "\n",
      "正在加载: A_MB_TRNFLW_QRYTRNFLW_features.pkl\n",
      "  - 形状: (3128, 239)\n",
      "  - 文件大小: 5.77 MB\n",
      "  - 内存占用: 5.92 MB\n",
      "\n",
      "正在加载: A_NATURE_features.pkl\n",
      "  - 形状: (5975, 26)\n",
      "  - 文件大小: 1.21 MB\n",
      "  - 内存占用: 1.51 MB\n",
      "\n",
      "正在加载: A_PROD_HOLD_features.pkl\n",
      "  - 形状: (5741, 41)\n",
      "  - 文件大小: 1.66 MB\n",
      "  - 内存占用: 1.95 MB\n",
      "\n",
      "正在加载: A_TR_APS_DTL_features.pkl\n",
      "  - 形状: (5616, 309)\n",
      "  - 文件大小: 13.29 MB\n",
      "  - 内存占用: 13.57 MB\n",
      "\n",
      "正在加载: A_TR_IBTF_features.pkl\n",
      "  - 形状: (2981, 86)\n",
      "  - 文件大小: 1.96 MB\n",
      "  - 内存占用: 2.11 MB\n",
      "\n",
      "正在加载: A_TR_TPAY_features.pkl\n",
      "  - 形状: (3595, 65)\n",
      "  - 文件大小: 1.76 MB\n",
      "  - 内存占用: 1.94 MB\n",
      "\n",
      "================================================================================\n",
      "成功加载 10 个特征文件\n",
      "\n",
      "特征文件汇总:\n",
      "                              特征文件   样本数   特征数  文件大小(MB)  内存占用(MB)\n",
      "0       A_MB_PAGEVIEW_DTL_features  2753   502     10.60     10.71\n",
      "1            A_TR_APS_DTL_features  5616   308     13.29     13.57\n",
      "2   A_MB_TRNFLW_QRYTRNFLW_features  3128   238      5.77      5.92\n",
      "3                 A_ASSET_features  5624   138      5.99      6.27\n",
      "4               A_TR_IBTF_features  2981    85      1.96      2.11\n",
      "5               A_TR_TPAY_features  3595    64      1.76      1.94\n",
      "6            A_CCD_TR_DTL_features    18    47      0.01      0.01\n",
      "7              A_AGET_PAY_features   530    46      0.21      0.23\n",
      "8             A_PROD_HOLD_features  5741    40      1.66      1.95\n",
      "9                A_NATURE_features  5975    25      1.21      1.51\n",
      "10                              总计     -  1493     42.46     44.22\n"
     ]
    }
   ],
   "source": [
    "# 创建加载器实例\n",
    "test_loader = FeatureLoader(feature_dir='./feature/A')\n",
    "print(\"特征加载器已初始化\")\n",
    "\n",
    "# 加载所有特征文件\n",
    "test_features_dict = test_loader.load_all_features()\n",
    "\n",
    "# 查看特征文件汇总\n",
    "test_summary_df = test_loader.get_feature_summary()\n",
    "print(\"\\n特征文件汇总:\")\n",
    "print(test_summary_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "62051560",
   "metadata": {},
   "source": [
    "# 建模训练"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8827853b",
   "metadata": {},
   "source": [
    "## 数据合并与预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "ec3d20d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "====================================================================================================\n",
      "步骤1: 数据合并\n",
      "====================================================================================================\n",
      "\n",
      "开始合并所有特征...\n",
      "====================================================================================================\n",
      "\n",
      "====================================================================================================\n",
      "第1步: 合并训练集特征\n",
      "====================================================================================================\n",
      "\n",
      "正在合并训练集...\n",
      "----------------------------------------------------------------------------------------------------\n",
      "初始形状: (51397, 3)\n",
      "特征文件数量: 10\n",
      "\n",
      "  [1/10] 合并特征: TRAIN_AGET_PAY_features\n",
      "    特征表形状: (4400, 47)\n",
      "    新增特征数: 46\n",
      "    当前形状: (51397, 49)\n",
      "\n",
      "  [2/10] 合并特征: TRAIN_ASSET_features\n",
      "    特征表形状: (48417, 139)\n",
      "    新增特征数: 138\n",
      "    当前形状: (51397, 187)\n",
      "\n",
      "  [3/10] 合并特征: TRAIN_CCD_TR_DTL_features\n",
      "    特征表形状: (169, 48)\n",
      "    新增特征数: 47\n",
      "    当前形状: (51397, 234)\n",
      "\n",
      "  [4/10] 合并特征: TRAIN_MB_PAGEVIEW_DTL_features\n",
      "    特征表形状: (24754, 503)\n",
      "    新增特征数: 502\n",
      "    当前形状: (51397, 736)\n",
      "\n",
      "  [5/10] 合并特征: TRAIN_MB_TRNFLW_QRYTRNFLW_features\n",
      "    特征表形状: (30002, 239)\n",
      "    新增特征数: 238\n",
      "    当前形状: (51397, 974)\n",
      "\n",
      "  [6/10] 合并特征: TRAIN_NATURE_features\n",
      "    特征表形状: (51397, 26)\n",
      "    新增特征数: 25\n",
      "    当前形状: (51397, 999)\n",
      "\n",
      "  [7/10] 合并特征: TRAIN_PROD_HOLD_features\n",
      "    特征表形状: (49448, 41)\n",
      "    新增特征数: 40\n",
      "    当前形状: (51397, 1039)\n",
      "\n",
      "  [8/10] 合并特征: TRAIN_TR_APS_DTL_features\n",
      "    特征表形状: (48608, 309)\n",
      "    新增特征数: 308\n",
      "    当前形状: (51397, 1347)\n",
      "\n",
      "  [9/10] 合并特征: TRAIN_TR_IBTF_features\n",
      "    特征表形状: (26375, 86)\n",
      "    新增特征数: 85\n",
      "    当前形状: (51397, 1432)\n",
      "\n",
      "  [10/10] 合并特征: TRAIN_TR_TPAY_features\n",
      "    特征表形状: (30926, 65)\n",
      "    新增特征数: 64\n",
      "    当前形状: (51397, 1496)\n",
      "\n",
      "训练集合并完成!\n",
      "最终形状: (51397, 1496)\n",
      "最终特征数: 1494\n",
      "\n",
      "====================================================================================================\n",
      "第2步: 合并测试集特征\n",
      "====================================================================================================\n",
      "\n",
      "正在合并测试集...\n",
      "----------------------------------------------------------------------------------------------------\n",
      "初始形状: (5975, 2)\n",
      "特征文件数量: 10\n",
      "\n",
      "  [1/10] 合并特征: A_AGET_PAY_features\n",
      "    特征表形状: (530, 47)\n",
      "    新增特征数: 46\n",
      "    当前形状: (5975, 48)\n",
      "\n",
      "  [2/10] 合并特征: A_ASSET_features\n",
      "    特征表形状: (5624, 139)\n",
      "    新增特征数: 138\n",
      "    当前形状: (5975, 186)\n",
      "\n",
      "  [3/10] 合并特征: A_CCD_TR_DTL_features\n",
      "    特征表形状: (18, 48)\n",
      "    新增特征数: 47\n",
      "    当前形状: (5975, 233)\n",
      "\n",
      "  [4/10] 合并特征: A_MB_PAGEVIEW_DTL_features\n",
      "    特征表形状: (2753, 503)\n",
      "    新增特征数: 502\n",
      "    当前形状: (5975, 735)\n",
      "\n",
      "  [5/10] 合并特征: A_MB_TRNFLW_QRYTRNFLW_features\n",
      "    特征表形状: (3128, 239)\n",
      "    新增特征数: 238\n",
      "    当前形状: (5975, 973)\n",
      "\n",
      "  [6/10] 合并特征: A_NATURE_features\n",
      "    特征表形状: (5975, 26)\n",
      "    新增特征数: 25\n",
      "    当前形状: (5975, 998)\n",
      "\n",
      "  [7/10] 合并特征: A_PROD_HOLD_features\n",
      "    特征表形状: (5741, 41)\n",
      "    新增特征数: 40\n",
      "    当前形状: (5975, 1038)\n",
      "\n",
      "  [8/10] 合并特征: A_TR_APS_DTL_features\n",
      "    特征表形状: (5616, 309)\n",
      "    新增特征数: 308\n",
      "    当前形状: (5975, 1346)\n",
      "\n",
      "  [9/10] 合并特征: A_TR_IBTF_features\n",
      "    特征表形状: (2981, 86)\n",
      "    新增特征数: 85\n",
      "    当前形状: (5975, 1431)\n",
      "\n",
      "  [10/10] 合并特征: A_TR_TPAY_features\n",
      "    特征表形状: (3595, 65)\n",
      "    新增特征数: 64\n",
      "    当前形状: (5975, 1495)\n",
      "\n",
      "测试集合并完成!\n",
      "最终形状: (5975, 1495)\n",
      "最终特征数: 1494\n",
      "\n",
      "====================================================================================================\n",
      "第3步: 检查训练集和测试集列一致性\n",
      "====================================================================================================\n",
      "\n",
      "列对比结果:\n",
      "  训练集总列数: 1496 (含CUST_NO和FLAG)\n",
      "  测试集总列数: 1495 (含CUST_NO)\n",
      "  共同特征列数: 1455\n",
      "\n",
      "  仅在训练集的列 (39个):\n",
      "    - module_6d748a11308d9e5cfb265d92df56f6e2_unique_pages\n",
      "    - active_months_dup_TRAIN_CCD_TR_DTL_features\n",
      "    - month_amt_min_dup_TRAIN_CCD_TR_DTL_features\n",
      "    - module_6d748a11308d9e5cfb265d92df56f6e2_count\n",
      "    - module_6d748a11308d9e5cfb265d92df56f6e2_last_visit\n",
      "    - page_cb9849b851be39ccd19b336550b5f20c_last_visit\n",
      "    - page_cb9849b851be39ccd19b336550b5f20c_visit_days\n",
      "    - page_14d5118976b7e53c1c2bdbbbb155dfe8_last_visit\n",
      "    - page_path_count_0e5c9561153e8b3fd936b94a5641c8e1_1f02209bfbd0fbd6e68330b02c75a2ee\n",
      "    - avg_month_count_dup_TRAIN_CCD_TR_DTL_features\n",
      "    ... 还有 29 个\n",
      "\n",
      "  仅在测试集的列 (39个):\n",
      "    - month_amt_cv_dup_A_CCD_TR_DTL_features\n",
      "    - page_be2de5e033a826ad66a90bab39dddc22_last_visit\n",
      "    - model_path_count_b989cf3952250c20ce3f5ce391638fbc_878ac3a435a9e30b63c6b4c8c1806171\n",
      "    - page_path_count_c5b386b7a6348a2f1ba70f2259fb827e_1f02209bfbd0fbd6e68330b02c75a2ee\n",
      "    - page_path_count_a3efea933884689e89b46cadd9aa989e_8d9046268f61d8dd1fc7b48e36677e1e\n",
      "    - page_464e4262d3173674b011f312fa50aa0d_last_visit\n",
      "    - module_1993162004f0f20fe0bcda9337e150b4_last_visit\n",
      "    - page_6aaf0b36ffc10b6e08130dfe69770d5d_count\n",
      "    - page_be2de5e033a826ad66a90bab39dddc22_count\n",
      "    - module_1993162004f0f20fe0bcda9337e150b4_count\n",
      "    ... 还有 29 个\n",
      "\n",
      "警告: 训练集和测试集存在不一致的列!\n",
      "建议: 删除不一致的列以确保模型训练和预测的一致性\n",
      "\n",
      "正在删除训练集中的不一致列...\n",
      "  已删除 39 列\n",
      "\n",
      "正在删除测试集中的不一致列...\n",
      "  已删除 39 列\n",
      "\n",
      "处理后:\n",
      "  训练集形状: (51397, 1457)\n",
      "  测试集形状: (5975, 1456)\n",
      "\n",
      "====================================================================================================\n",
      "所有特征合并完成!\n",
      "====================================================================================================\n",
      "\n",
      "最终结果:\n",
      "  训练集形状: (51397, 1457)\n",
      "  测试集形状: (5975, 1456)\n",
      "  训练集特征数: 1455  (不含CUST_NO和FLAG)\n",
      "  测试集特征数: 1455  (不含CUST_NO)\n",
      "\n",
      "\n",
      "\n",
      "训练集质量检查...\n",
      "====================================================================================================\n",
      "\n",
      "1. 重复记录数: 0\n",
      "\n",
      "2. 缺失值统计:\n",
      "   有缺失的列数: 1429 / 1457\n",
      "   总缺失值数: 42540954\n",
      "   总缺失率: 56.81%\n",
      "\n",
      "   缺失最严重的前15列:\n",
      "     PROD_CARD_COUNT: 51397 (100.00%)\n",
      "     MB_IND: 51397 (100.00%)\n",
      "     TDPT_PAY_ALI_IND: 51397 (100.00%)\n",
      "     TDPT_PAY_WCHT_IND: 51397 (100.00%)\n",
      "     PROD_DEPOSIT_TYPE: 51397 (100.00%)\n",
      "     PROD_LOAN_TYPE: 51397 (100.00%)\n",
      "     EBNK_IND: 51397 (100.00%)\n",
      "     PROD_WEALTH_COUNT: 51397 (100.00%)\n",
      "     PROD_ECHANNEL_COUNT: 51397 (100.00%)\n",
      "     PROD_THIRDPAY_COUNT: 51397 (100.00%)\n",
      "     PROD_HAS_PAY: 51397 (100.00%)\n",
      "     PROD_VALUE_SCORE: 51397 (100.00%)\n",
      "     PROD_ECHANNEL_ACTIVE: 51397 (100.00%)\n",
      "     PROD_INVEST_ACTIVE: 51397 (100.00%)\n",
      "     PAY_IND: 51397 (100.00%)\n",
      "\n",
      "3. 常量列数: 44\n",
      "\n",
      "4. 数据类型分布:\n",
      "   float64: 1444\n",
      "   int64: 6\n",
      "   int32: 6\n",
      "   object: 1\n",
      "\n",
      "5. 目标变量分布 (FLAG):\n",
      "   类别 1:   1459 ( 2.84%)\n",
      "   类别 2:    309 ( 0.60%)\n",
      "   类别 3:   5586 (10.87%)\n",
      "   类别 4:  15303 (29.77%)\n",
      "   类别 5:   8193 (15.94%)\n",
      "   类别 6:  10261 (19.96%)\n",
      "   类别 7:   1276 ( 2.48%)\n",
      "   类别 8:   2498 ( 4.86%)\n",
      "   类别 9:   6476 (12.60%)\n",
      "   类别 10:     36 ( 0.07%)\n",
      "\n",
      "   类别不平衡比: 425.08:1 (最大类/最小类)\n",
      "   警告: 存在严重类别不平衡! 建议使用类别权重或采样策略\n",
      "====================================================================================================\n",
      "\n",
      "\n",
      "\n",
      "测试集质量检查...\n",
      "====================================================================================================\n",
      "\n",
      "1. 重复记录数: 0\n",
      "\n",
      "2. 缺失值统计:\n",
      "   有缺失的列数: 1430 / 1456\n",
      "   总缺失值数: 4612895\n",
      "   总缺失率: 53.02%\n",
      "\n",
      "   缺失最严重的前15列:\n",
      "     PAY_IND: 5975 (100.00%)\n",
      "     BOND_IND: 5975 (100.00%)\n",
      "     EBNK_IND: 5975 (100.00%)\n",
      "     PROD_ECHANNEL_COUNT: 5975 (100.00%)\n",
      "     MB_IND: 5975 (100.00%)\n",
      "     PROD_THIRDPAY_COUNT: 5975 (100.00%)\n",
      "     PROD_INVEST_ACTIVE: 5975 (100.00%)\n",
      "     PROD_ECHANNEL_ACTIVE: 5975 (100.00%)\n",
      "     PROD_VALUE_SCORE: 5975 (100.00%)\n",
      "     MS_IND: 5975 (100.00%)\n",
      "     METAL_IND: 5975 (100.00%)\n",
      "     INSUR_IND: 5975 (100.00%)\n",
      "     FUND_IND: 5975 (100.00%)\n",
      "     PROD_WEALTH_COUNT: 5975 (100.00%)\n",
      "     PROD_HAS_PAY: 5975 (100.00%)\n",
      "\n",
      "3. 常量列数: 46\n",
      "\n",
      "4. 数据类型分布:\n",
      "   float64: 1444\n",
      "   int32: 6\n",
      "   int64: 5\n",
      "   object: 1\n",
      "====================================================================================================\n"
     ]
    }
   ],
   "source": [
    "class DataMerger:\n",
    "    \"\"\"\n",
    "    数据合并器 - 负责将所有特征表与目标表合并\n",
    "    支持训练集和测试集的独立处理（特征文件命名可以不同）\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, train_features_dict, test_features_dict, train_target_df, test_target_df, key='CUST_NO'):\n",
    "        \"\"\"\n",
    "        初始化合并器\n",
    "        \n",
    "        参数:\n",
    "        - train_features_dict: 训练集特征字典（TRAIN_开头）\n",
    "        - test_features_dict: 测试集特征字典（A_开头）\n",
    "        - train_target_df: 训练集目标客户表\n",
    "        - test_target_df: 测试集目标客户表\n",
    "        - key: 主键列名\n",
    "        \"\"\"\n",
    "        self.train_features_dict = train_features_dict\n",
    "        self.test_features_dict = test_features_dict\n",
    "        self.train_target_df = train_target_df.copy()\n",
    "        self.test_target_df = test_target_df.copy()\n",
    "        self.key = key\n",
    "        self.train_merged = None\n",
    "        self.test_merged = None\n",
    "        \n",
    "    def merge_features_for_dataset(self, target_df, features_dict, dataset_name='数据集'):\n",
    "        \"\"\"\n",
    "        为单个数据集（训练集或测试集）合并所有特征\n",
    "        \n",
    "        参数:\n",
    "        - target_df: 目标客户表\n",
    "        - features_dict: 特征字典\n",
    "        - dataset_name: 数据集名称\n",
    "        \n",
    "        返回:\n",
    "        - merged_df: 合并后的数据\n",
    "        \"\"\"\n",
    "        print(f\"\\n正在合并{dataset_name}...\")\n",
    "        print(\"-\" * 100)\n",
    "        \n",
    "        merged_df = target_df.copy()\n",
    "        print(f\"初始形状: {merged_df.shape}\")\n",
    "        print(f\"特征文件数量: {len(features_dict)}\")\n",
    "        \n",
    "        # 逐个合并特征表\n",
    "        for idx, (feature_name, feature_df) in enumerate(sorted(features_dict.items()), 1):\n",
    "            print(f\"\\n  [{idx}/{len(features_dict)}] 合并特征: {feature_name}\")\n",
    "            print(f\"    特征表形状: {feature_df.shape}\")\n",
    "            \n",
    "            # 检查是否有主键\n",
    "            if self.key not in feature_df.columns:\n",
    "                print(f\"    警告: {feature_name} 缺少主键 {self.key}, 跳过\")\n",
    "                continue\n",
    "            \n",
    "            # 执行合并\n",
    "            before_cols = len(merged_df.columns)\n",
    "            \n",
    "            merged_df = merged_df.merge(\n",
    "                feature_df, \n",
    "                on=self.key, \n",
    "                how='left',\n",
    "                suffixes=('', f'_dup_{feature_name}')\n",
    "            )\n",
    "            \n",
    "            after_cols = len(merged_df.columns)\n",
    "            added_cols = after_cols - before_cols\n",
    "            \n",
    "            print(f\"    新增特征数: {added_cols}\")\n",
    "            print(f\"    当前形状: {merged_df.shape}\")\n",
    "            \n",
    "            # 检查是否有重复列\n",
    "            dup_cols = [col for col in merged_df.columns if col.startswith(f'_dup_{feature_name}')]\n",
    "            if dup_cols:\n",
    "                print(f\"    警告: 发现重复列 {dup_cols}, 已添加后缀\")\n",
    "        \n",
    "        print(f\"\\n{dataset_name}合并完成!\")\n",
    "        print(f\"最终形状: {merged_df.shape}\")\n",
    "        print(f\"最终特征数: {merged_df.shape[1] - 1 - ('FLAG' in merged_df.columns)}\")\n",
    "        \n",
    "        return merged_df\n",
    "    \n",
    "    def merge_all_features(self):\n",
    "        \"\"\"\n",
    "        分别合并训练集和测试集的所有特征\n",
    "        \n",
    "        返回:\n",
    "        - train_merged: 训练集合并后的完整数据\n",
    "        - test_merged: 测试集合并后的完整数据\n",
    "        \"\"\"\n",
    "        print(\"\\n开始合并所有特征...\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        # 1. 合并训练集\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"第1步: 合并训练集特征\")\n",
    "        print(\"=\"*100)\n",
    "        self.train_merged = self.merge_features_for_dataset(\n",
    "            self.train_target_df, \n",
    "            self.train_features_dict, \n",
    "            '训练集'\n",
    "        )\n",
    "        \n",
    "        # 2. 合并测试集\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"第2步: 合并测试集特征\")\n",
    "        print(\"=\"*100)\n",
    "        self.test_merged = self.merge_features_for_dataset(\n",
    "            self.test_target_df, \n",
    "            self.test_features_dict, \n",
    "            '测试集'\n",
    "        )\n",
    "        \n",
    "        # 3. 检查训练集和测试集的列一致性\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"第3步: 检查训练集和测试集列一致性\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        train_cols = set(self.train_merged.columns) - {'FLAG'}  # 训练集有FLAG\n",
    "        test_cols = set(self.test_merged.columns)  # 测试集没有FLAG\n",
    "        \n",
    "        # 找出不一致的列\n",
    "        only_in_train = train_cols - test_cols - {self.key}\n",
    "        only_in_test = test_cols - train_cols - {self.key}\n",
    "        common_cols = (train_cols & test_cols) - {self.key}\n",
    "        \n",
    "        print(f\"\\n列对比结果:\")\n",
    "        print(f\"  训练集总列数: {len(self.train_merged.columns)} (含CUST_NO和FLAG)\")\n",
    "        print(f\"  测试集总列数: {len(self.test_merged.columns)} (含CUST_NO)\")\n",
    "        print(f\"  共同特征列数: {len(common_cols)}\")\n",
    "        \n",
    "        if only_in_train:\n",
    "            print(f\"\\n  仅在训练集的列 ({len(only_in_train)}个):\")\n",
    "            for col in list(only_in_train)[:10]:\n",
    "                print(f\"    - {col}\")\n",
    "            if len(only_in_train) > 10:\n",
    "                print(f\"    ... 还有 {len(only_in_train) - 10} 个\")\n",
    "        else:\n",
    "            print(f\"  仅在训练集的列: 无 ✓\")\n",
    "        \n",
    "        if only_in_test:\n",
    "            print(f\"\\n  仅在测试集的列 ({len(only_in_test)}个):\")\n",
    "            for col in list(only_in_test)[:10]:\n",
    "                print(f\"    - {col}\")\n",
    "            if len(only_in_test) > 10:\n",
    "                print(f\"    ... 还有 {len(only_in_test) - 10} 个\")\n",
    "        else:\n",
    "            print(f\"  仅在测试集的列: 无 ✓\")\n",
    "        \n",
    "        # 4. 处理不一致的列\n",
    "        if only_in_train or only_in_test:\n",
    "            print(f\"\\n警告: 训练集和测试集存在不一致的列!\")\n",
    "            print(f\"建议: 删除不一致的列以确保模型训练和预测的一致性\")\n",
    "            \n",
    "            # 自动删除不一致的列\n",
    "            if only_in_train:\n",
    "                print(f\"\\n正在删除训练集中的不一致列...\")\n",
    "                self.train_merged = self.train_merged.drop(columns=list(only_in_train))\n",
    "                print(f\"  已删除 {len(only_in_train)} 列\")\n",
    "            \n",
    "            if only_in_test:\n",
    "                print(f\"\\n正在删除测试集中的不一致列...\")\n",
    "                self.test_merged = self.test_merged.drop(columns=list(only_in_test))\n",
    "                print(f\"  已删除 {len(only_in_test)} 列\")\n",
    "            \n",
    "            print(f\"\\n处理后:\")\n",
    "            print(f\"  训练集形状: {self.train_merged.shape}\")\n",
    "            print(f\"  测试集形状: {self.test_merged.shape}\")\n",
    "        else:\n",
    "            print(f\"\\n列一致性检查通过 ✓\")\n",
    "        \n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(f\"所有特征合并完成!\")\n",
    "        print(\"=\"*100)\n",
    "        print(f\"\\n最终结果:\")\n",
    "        print(f\"  训练集形状: {self.train_merged.shape}\")\n",
    "        print(f\"  测试集形状: {self.test_merged.shape}\")\n",
    "        print(f\"  训练集特征数: {self.train_merged.shape[1] - 2}  (不含CUST_NO和FLAG)\")\n",
    "        print(f\"  测试集特征数: {self.test_merged.shape[1] - 1}  (不含CUST_NO)\")\n",
    "        \n",
    "        return self.train_merged, self.test_merged\n",
    "    \n",
    "    def check_data_quality(self, data, data_name='数据'):\n",
    "        \"\"\"\n",
    "        检查数据质量\n",
    "        \n",
    "        参数:\n",
    "        - data: 待检查的数据\n",
    "        - data_name: 数据名称\n",
    "        \n",
    "        返回:\n",
    "        - dict: 质量报告\n",
    "        \"\"\"\n",
    "        print(f\"\\n{data_name}质量检查...\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        # 1. 检查重复记录\n",
    "        duplicates = data.duplicated(subset=[self.key]).sum()\n",
    "        print(f\"\\n1. 重复记录数: {duplicates}\")\n",
    "        \n",
    "        # 2. 检查缺失值\n",
    "        missing_stats = data.isnull().sum().sort_values(ascending=False)\n",
    "        missing_cols = missing_stats[missing_stats > 0]\n",
    "        print(f\"\\n2. 缺失值统计:\")\n",
    "        print(f\"   有缺失的列数: {len(missing_cols)} / {len(data.columns)}\")\n",
    "        print(f\"   总缺失值数: {missing_stats.sum()}\")\n",
    "        print(f\"   总缺失率: {missing_stats.sum() / (data.shape[0] * data.shape[1]) * 100:.2f}%\")\n",
    "        \n",
    "        if len(missing_cols) > 0:\n",
    "            print(f\"\\n   缺失最严重的前15列:\")\n",
    "            for col, count in missing_cols.head(15).items():\n",
    "                rate = count / len(data) * 100\n",
    "                print(f\"     {col}: {count} ({rate:.2f}%)\")\n",
    "        \n",
    "        # 3. 检查常量列\n",
    "        constant_cols = []\n",
    "        for col in data.columns:\n",
    "            if col not in [self.key, 'FLAG']:\n",
    "                if data[col].nunique() <= 1:\n",
    "                    constant_cols.append(col)\n",
    "        print(f\"\\n3. 常量列数: {len(constant_cols)}\")\n",
    "        if len(constant_cols) > 0 and len(constant_cols) <= 20:\n",
    "            print(f\"   列名: {constant_cols}\")\n",
    "        \n",
    "        # 4. 数据类型统计\n",
    "        dtype_counts = data.dtypes.value_counts()\n",
    "        print(f\"\\n4. 数据类型分布:\")\n",
    "        for dtype, count in dtype_counts.items():\n",
    "            print(f\"   {dtype}: {count}\")\n",
    "        \n",
    "        # 5. 目标变量统计(仅训练集)\n",
    "        if 'FLAG' in data.columns:\n",
    "            flag_dist = data['FLAG'].value_counts().sort_index()\n",
    "            print(f\"\\n5. 目标变量分布 (FLAG):\")\n",
    "            total = len(data)\n",
    "            for flag, count in flag_dist.items():\n",
    "                rate = count / total * 100\n",
    "                print(f\"   类别 {flag}: {count:6d} ({rate:5.2f}%)\")\n",
    "            \n",
    "            # 检查类别不平衡程度\n",
    "            max_rate = flag_dist.max() / total * 100\n",
    "            min_rate = flag_dist.min() / total * 100\n",
    "            imbalance_ratio = flag_dist.max() / flag_dist.min()\n",
    "            print(f\"\\n   类别不平衡比: {imbalance_ratio:.2f}:1 (最大类/最小类)\")\n",
    "            if imbalance_ratio > 5:\n",
    "                print(f\"   警告: 存在严重类别不平衡! 建议使用类别权重或采样策略\")\n",
    "        \n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        quality_report = {\n",
    "            'duplicates': duplicates,\n",
    "            'missing_cols': len(missing_cols),\n",
    "            'constant_cols': constant_cols,\n",
    "            'dtype_counts': dtype_counts.to_dict()\n",
    "        }\n",
    "        \n",
    "        return quality_report\n",
    "\n",
    "# 创建合并器并执行合并\n",
    "print(\"=\"*100)\n",
    "print(\"步骤1: 数据合并\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "merger = DataMerger(\n",
    "    train_features_dict=train_features_dict,\n",
    "    test_features_dict=test_features_dict,\n",
    "    train_target_df=train_target_cust,\n",
    "    test_target_df=test_target_cust\n",
    ")\n",
    "\n",
    "# 执行合并\n",
    "train_data, test_data = merger.merge_all_features()\n",
    "\n",
    "# 数据质量检查\n",
    "print(\"\\n\")\n",
    "train_quality = merger.check_data_quality(train_data, '训练集')\n",
    "print(\"\\n\")\n",
    "test_quality = merger.check_data_quality(test_data, '测试集')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a2df69c2",
   "metadata": {},
   "source": [
    "## 数据预处理与特征优化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "f0970ecb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "====================================================================================================\n",
      "步骤2: 数据预处理与特征优化\n",
      "====================================================================================================\n",
      "\n",
      "删除常量列...\n",
      "发现 44 个常量列\n",
      "已删除常量列\n",
      "\n",
      "删除缺失率>95.0%的列...\n",
      "发现 167 个高缺失率列\n",
      "已删除高缺失率列\n",
      "\n",
      "处理无穷值...\n",
      "未发现无穷值\n",
      "\n",
      "识别特征类型...\n",
      "类别型特征数: 10\n",
      "数值型特征数: 1234\n",
      "\n",
      "优化数据类型...\n",
      "训练集内存: 491.39MB -> 247.19MB (减少 49.7%)\n",
      "测试集内存: 57.08MB -> 28.69MB (减少 49.7%)\n",
      "\n",
      "预处理完成!\n",
      "训练集最终形状: (51397, 1246)\n",
      "测试集最终形状: (5975, 1245)\n",
      "删除的列数: 211\n",
      "\n",
      "====================================================================================================\n",
      "特征类型详情:\n",
      "====================================================================================================\n",
      "\n",
      "类别型特征 (10个):\n",
      "  1. NATURE_SEX_CD (唯一值: 2)\n",
      "  2. NATURE_RANK_CD (唯一值: 8)\n",
      "  3. NATURE_SEAN_ACTV_IND (唯一值: 2)\n",
      "  4. NATURE_AGE_GROUP (唯一值: 6)\n",
      "  5. NATURE_SEX_RANK_INTERACT (唯一值: 12)\n",
      "  6. NATURE_IS_YOUNG (唯一值: 2)\n",
      "  7. NATURE_IS_MIDDLE (唯一值: 2)\n",
      "  8. NATURE_IS_OLD (唯一值: 2)\n",
      "  9. NATURE_IS_HIGH_RANK (唯一值: 2)\n",
      "  10. NATURE_IS_LOW_RANK (唯一值: 2)\n",
      "\n",
      "数值型特征 (1234个):\n",
      "  1. aget_pay_count\n",
      "  2. aget_pay_unit_count\n",
      "  3. tr_amt_sum\n",
      "  4. tr_amt_mean\n",
      "  5. tr_amt_std\n",
      "  6. tr_amt_median\n",
      "  7. tr_amt_max\n",
      "  8. tr_amt_min\n",
      "  9. tr_amt_skew\n",
      "  10. tr_amt_transformed_sum\n",
      "  11. tr_amt_transformed_mean\n",
      "  12. tr_amt_transformed_std\n",
      "  13. prov_cd_mean\n",
      "  14. prov_cd_nunique\n",
      "  15. unit_typ_cd_mean\n",
      "  16. unit_typ_cd_nunique\n",
      "  17. tr_amt_range\n",
      "  18. tr_amt_cv\n",
      "  19. avg_amt_per_unit\n",
      "  20. aget_day_diff_max\n",
      "  ... 还有 1214 个数值型特征\n"
     ]
    }
   ],
   "source": [
    "class DataPreprocessor:\n",
    "    \"\"\"\n",
    "    数据预处理器\n",
    "    处理缺失值、常量列、数据类型等\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, train_data, test_data, target_col='FLAG', id_col='CUST_NO'):\n",
    "        \"\"\"\n",
    "        初始化预处理器\n",
    "        \n",
    "        参数:\n",
    "        - train_data: 训练集\n",
    "        - test_data: 测试集\n",
    "        - target_col: 目标列名\n",
    "        - id_col: ID列名\n",
    "        \"\"\"\n",
    "        self.train_data = train_data.copy()\n",
    "        self.test_data = test_data.copy()\n",
    "        self.target_col = target_col\n",
    "        self.id_col = id_col\n",
    "        self.dropped_cols = []\n",
    "        self.feature_types = {}\n",
    "        \n",
    "    def remove_constant_columns(self):\n",
    "        \"\"\"\n",
    "        删除常量列(只有一个唯一值的列)\n",
    "        \"\"\"\n",
    "        print(\"\\n删除常量列...\")\n",
    "        constant_cols = []\n",
    "        \n",
    "        for col in self.train_data.columns:\n",
    "            if col not in [self.id_col, self.target_col]:\n",
    "                if self.train_data[col].nunique() <= 1:\n",
    "                    constant_cols.append(col)\n",
    "        \n",
    "        if len(constant_cols) > 0:\n",
    "            print(f\"发现 {len(constant_cols)} 个常量列\")\n",
    "            self.train_data = self.train_data.drop(columns=constant_cols)\n",
    "            self.test_data = self.test_data.drop(columns=constant_cols)\n",
    "            self.dropped_cols.extend(constant_cols)\n",
    "            print(f\"已删除常量列\")\n",
    "        else:\n",
    "            print(\"未发现常量列\")\n",
    "        \n",
    "        return constant_cols\n",
    "    \n",
    "    def remove_high_missing_columns(self, threshold=0.95):\n",
    "        \"\"\"\n",
    "        删除缺失率过高的列\n",
    "        \n",
    "        参数:\n",
    "        - threshold: 缺失率阈值(默认95%)\n",
    "        \"\"\"\n",
    "        print(f\"\\n删除缺失率>{threshold*100}%的列...\")\n",
    "        high_missing_cols = []\n",
    "        \n",
    "        for col in self.train_data.columns:\n",
    "            if col not in [self.id_col, self.target_col]:\n",
    "                missing_rate = self.train_data[col].isnull().sum() / len(self.train_data)\n",
    "                if missing_rate > threshold:\n",
    "                    high_missing_cols.append(col)\n",
    "        \n",
    "        if len(high_missing_cols) > 0:\n",
    "            print(f\"发现 {len(high_missing_cols)} 个高缺失率列\")\n",
    "            self.train_data = self.train_data.drop(columns=high_missing_cols)\n",
    "            self.test_data = self.test_data.drop(columns=high_missing_cols)\n",
    "            self.dropped_cols.extend(high_missing_cols)\n",
    "            print(f\"已删除高缺失率列\")\n",
    "        else:\n",
    "            print(\"未发现高缺失率列\")\n",
    "        \n",
    "        return high_missing_cols\n",
    "    \n",
    "    def handle_inf_values(self):\n",
    "        \"\"\"\n",
    "        处理无穷值\n",
    "        \"\"\"\n",
    "        print(\"\\n处理无穷值...\")\n",
    "        \n",
    "        numeric_cols = self.train_data.select_dtypes(include=[np.number]).columns\n",
    "        numeric_cols = [col for col in numeric_cols if col not in [self.id_col, self.target_col]]\n",
    "        \n",
    "        inf_count = 0\n",
    "        for col in numeric_cols:\n",
    "            # 训练集\n",
    "            train_inf_mask = np.isinf(self.train_data[col])\n",
    "            if train_inf_mask.any():\n",
    "                inf_count += train_inf_mask.sum()\n",
    "                self.train_data.loc[train_inf_mask, col] = np.nan\n",
    "            \n",
    "            # 测试集\n",
    "            test_inf_mask = np.isinf(self.test_data[col])\n",
    "            if test_inf_mask.any():\n",
    "                inf_count += test_inf_mask.sum()\n",
    "                self.test_data.loc[test_inf_mask, col] = np.nan\n",
    "        \n",
    "        if inf_count > 0:\n",
    "            print(f\"发现并处理 {inf_count} 个无穷值\")\n",
    "        else:\n",
    "            print(\"未发现无穷值\")\n",
    "    \n",
    "    def identify_feature_types(self):\n",
    "        \"\"\"\n",
    "        识别特征类型(数值型、类别型)\n",
    "        \"\"\"\n",
    "        print(\"\\n识别特征类型...\")\n",
    "        \n",
    "        feature_cols = [col for col in self.train_data.columns \n",
    "                       if col not in [self.id_col, self.target_col]]\n",
    "        \n",
    "        categorical_features = []\n",
    "        numerical_features = []\n",
    "        \n",
    "        for col in feature_cols:\n",
    "            # 判断是否为类别型特征\n",
    "            if self.train_data[col].dtype == 'object':\n",
    "                categorical_features.append(col)\n",
    "            elif self.train_data[col].dtype in ['int64', 'int32']:\n",
    "                # 整数型,判断唯一值数量\n",
    "                nunique = self.train_data[col].nunique()\n",
    "                if nunique <= 20:  # 唯一值少于20个认为是类别型\n",
    "                    categorical_features.append(col)\n",
    "                else:\n",
    "                    numerical_features.append(col)\n",
    "            else:\n",
    "                numerical_features.append(col)\n",
    "        \n",
    "        self.feature_types = {\n",
    "            'categorical': categorical_features,\n",
    "            'numerical': numerical_features\n",
    "        }\n",
    "        \n",
    "        print(f\"类别型特征数: {len(categorical_features)}\")\n",
    "        print(f\"数值型特征数: {len(numerical_features)}\")\n",
    "        \n",
    "        return self.feature_types\n",
    "    \n",
    "    def convert_data_types(self):\n",
    "        \"\"\"\n",
    "        转换数据类型以优化内存\n",
    "        \"\"\"\n",
    "        print(\"\\n优化数据类型...\")\n",
    "        \n",
    "        # 记录优化前的内存\n",
    "        train_mem_before = self.train_data.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "        test_mem_before = self.test_data.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "        \n",
    "        for col in self.train_data.columns:\n",
    "            if col in [self.id_col, self.target_col]:\n",
    "                continue\n",
    "            \n",
    "            col_type = self.train_data[col].dtype\n",
    "            \n",
    "            # 整数型优化\n",
    "            if col_type in ['int64', 'int32']:\n",
    "                c_min = self.train_data[col].min()\n",
    "                c_max = self.train_data[col].max()\n",
    "                \n",
    "                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n",
    "                    self.train_data[col] = self.train_data[col].astype(np.int8)\n",
    "                    self.test_data[col] = self.test_data[col].astype(np.int8)\n",
    "                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n",
    "                    self.train_data[col] = self.train_data[col].astype(np.int16)\n",
    "                    self.test_data[col] = self.test_data[col].astype(np.int16)\n",
    "                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n",
    "                    self.train_data[col] = self.train_data[col].astype(np.int32)\n",
    "                    self.test_data[col] = self.test_data[col].astype(np.int32)\n",
    "            \n",
    "            # 浮点型优化\n",
    "            elif col_type in ['float64', 'float32']:\n",
    "                c_min = self.train_data[col].min()\n",
    "                c_max = self.train_data[col].max()\n",
    "                \n",
    "                if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n",
    "                    self.train_data[col] = self.train_data[col].astype(np.float32)\n",
    "                    self.test_data[col] = self.test_data[col].astype(np.float32)\n",
    "        \n",
    "        # 记录优化后的内存\n",
    "        train_mem_after = self.train_data.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "        test_mem_after = self.test_data.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "        \n",
    "        print(f\"训练集内存: {train_mem_before:.2f}MB -> {train_mem_after:.2f}MB \"\n",
    "              f\"(减少 {(train_mem_before - train_mem_after) / train_mem_before * 100:.1f}%)\")\n",
    "        print(f\"测试集内存: {test_mem_before:.2f}MB -> {test_mem_after:.2f}MB \"\n",
    "              f\"(减少 {(test_mem_before - test_mem_after) / test_mem_before * 100:.1f}%)\")\n",
    "    \n",
    "    def process(self, remove_constant=True, remove_high_missing=True, \n",
    "                missing_threshold=0.95, handle_inf=True, optimize_dtype=True):\n",
    "        \"\"\"\n",
    "        执行完整的预处理流程\n",
    "        \n",
    "        参数:\n",
    "        - remove_constant: 是否删除常量列\n",
    "        - remove_high_missing: 是否删除高缺失率列\n",
    "        - missing_threshold: 缺失率阈值\n",
    "        - handle_inf: 是否处理无穷值\n",
    "        - optimize_dtype: 是否优化数据类型\n",
    "        \n",
    "        返回:\n",
    "        - train_data: 处理后的训练集\n",
    "        - test_data: 处理后的测试集\n",
    "        - feature_types: 特征类型字典\n",
    "        \"\"\"\n",
    "        print(\"=\"*100)\n",
    "        print(\"步骤2: 数据预处理与特征优化\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        if remove_constant:\n",
    "            self.remove_constant_columns()\n",
    "        \n",
    "        if remove_high_missing:\n",
    "            self.remove_high_missing_columns(threshold=missing_threshold)\n",
    "        \n",
    "        if handle_inf:\n",
    "            self.handle_inf_values()\n",
    "        \n",
    "        self.identify_feature_types()\n",
    "        \n",
    "        if optimize_dtype:\n",
    "            self.convert_data_types()\n",
    "        \n",
    "        print(f\"\\n预处理完成!\")\n",
    "        print(f\"训练集最终形状: {self.train_data.shape}\")\n",
    "        print(f\"测试集最终形状: {self.test_data.shape}\")\n",
    "        print(f\"删除的列数: {len(self.dropped_cols)}\")\n",
    "        \n",
    "        return self.train_data, self.test_data, self.feature_types\n",
    "\n",
    "# 执行预处理\n",
    "preprocessor = DataPreprocessor(train_data, test_data)\n",
    "train_data_processed, test_data_processed, feature_types = preprocessor.process()\n",
    "\n",
    "# 显示特征类型信息\n",
    "print(\"\\n\" + \"=\"*100)\n",
    "print(\"特征类型详情:\")\n",
    "print(\"=\"*100)\n",
    "print(f\"\\n类别型特征 ({len(feature_types['categorical'])}个):\")\n",
    "if len(feature_types['categorical']) > 0:\n",
    "    for i, col in enumerate(feature_types['categorical'][:20], 1):\n",
    "        nunique_train = train_data_processed[col].nunique()\n",
    "        print(f\"  {i}. {col} (唯一值: {nunique_train})\")\n",
    "    if len(feature_types['categorical']) > 20:\n",
    "        print(f\"  ... 还有 {len(feature_types['categorical']) - 20} 个类别型特征\")\n",
    "\n",
    "print(f\"\\n数值型特征 ({len(feature_types['numerical'])}个):\")\n",
    "if len(feature_types['numerical']) > 0:\n",
    "    for i, col in enumerate(feature_types['numerical'][:20], 1):\n",
    "        print(f\"  {i}. {col}\")\n",
    "    if len(feature_types['numerical']) > 20:\n",
    "        print(f\"  ... 还有 {len(feature_types['numerical']) - 20} 个数值型特征\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4fa5edc7",
   "metadata": {},
   "source": [
    "## 自定义Macro-F1评分函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "793473f4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "自定义Macro-F1评分器已创建\n",
      "评分器名称: macro_f1\n",
      "最优值: 1.0\n",
      "越大越好: True\n"
     ]
    }
   ],
   "source": [
    "from sklearn.metrics import f1_score\n",
    "\n",
    "def macro_f1_score(y_true, y_pred):\n",
    "    \"\"\"\n",
    "    计算Macro-F1分数\n",
    "    \n",
    "    参数:\n",
    "    - y_true: 真实标签\n",
    "    - y_pred: 预测标签\n",
    "    \n",
    "    返回:\n",
    "    - macro_f1: Macro-F1分数\n",
    "    \"\"\"\n",
    "    return f1_score(y_true, y_pred, average='macro')\n",
    "\n",
    "macro_f1_scorer = make_scorer(\n",
    "    name='macro_f1',\n",
    "    score_func=macro_f1_score,\n",
    "    optimum=1.0,\n",
    "    greater_is_better=True,\n",
    "    needs_proba=False,\n",
    "    needs_threshold=False\n",
    ")\n",
    "\n",
    "print(\"自定义Macro-F1评分器已创建\")\n",
    "print(f\"评分器名称: {macro_f1_scorer.name}\")\n",
    "print(f\"最优值: {macro_f1_scorer.optimum}\")\n",
    "print(f\"越大越好: {macro_f1_scorer.greater_is_better}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fc0e390a",
   "metadata": {},
   "source": [
    "## 准备训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "5900f962",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "====================================================================================================\n",
      "步骤3: 准备训练数据\n",
      "====================================================================================================\n",
      "\n",
      "训练集特征形状: (51397, 1244)\n",
      "训练集目标形状: (51397,)\n",
      "测试集特征形状: (5975, 1244)\n",
      "\n",
      "原始目标变量分布 (1-10):\n",
      "  类别 1:   1459 ( 2.84%)\n",
      "  类别 2:    309 ( 0.60%)\n",
      "  类别 3:   5586 (10.87%)\n",
      "  类别 4:  15303 (29.77%)\n",
      "  类别 5:   8193 (15.94%)\n",
      "  类别 6:  10261 (19.96%)\n",
      "  类别 7:   1276 ( 2.48%)\n",
      "  类别 8:   2498 ( 4.86%)\n",
      "  类别 9:   6476 (12.60%)\n",
      "  类别 10:     36 ( 0.07%)\n",
      "\n",
      "转换后目标变量分布 (0-9,用于模型训练):\n",
      "  类别 0:   1459 ( 2.84%)\n",
      "  类别 1:    309 ( 0.60%)\n",
      "  类别 2:   5586 (10.87%)\n",
      "  类别 3:  15303 (29.77%)\n",
      "  类别 4:   8193 (15.94%)\n",
      "  类别 5:  10261 (19.96%)\n",
      "  类别 6:   1276 ( 2.48%)\n",
      "  类别 7:   2498 ( 4.86%)\n",
      "  类别 8:   6476 (12.60%)\n",
      "  类别 9:     36 ( 0.07%)\n",
      "\n",
      "总特征数: 1244\n",
      "\n",
      "数据质量检查:\n",
      "训练集缺失值总数: 32719392\n",
      "\n",
      "训练集特征形状: (51397, 1244)\n",
      "训练集目标形状: (51397,)\n",
      "测试集特征形状: (5975, 1244)\n",
      "\n",
      "原始目标变量分布 (1-10):\n",
      "  类别 1:   1459 ( 2.84%)\n",
      "  类别 2:    309 ( 0.60%)\n",
      "  类别 3:   5586 (10.87%)\n",
      "  类别 4:  15303 (29.77%)\n",
      "  类别 5:   8193 (15.94%)\n",
      "  类别 6:  10261 (19.96%)\n",
      "  类别 7:   1276 ( 2.48%)\n",
      "  类别 8:   2498 ( 4.86%)\n",
      "  类别 9:   6476 (12.60%)\n",
      "  类别 10:     36 ( 0.07%)\n",
      "\n",
      "转换后目标变量分布 (0-9,用于模型训练):\n",
      "  类别 0:   1459 ( 2.84%)\n",
      "  类别 1:    309 ( 0.60%)\n",
      "  类别 2:   5586 (10.87%)\n",
      "  类别 3:  15303 (29.77%)\n",
      "  类别 4:   8193 (15.94%)\n",
      "  类别 5:  10261 (19.96%)\n",
      "  类别 6:   1276 ( 2.48%)\n",
      "  类别 7:   2498 ( 4.86%)\n",
      "  类别 8:   6476 (12.60%)\n",
      "  类别 9:     36 ( 0.07%)\n",
      "\n",
      "总特征数: 1244\n",
      "\n",
      "数据质量检查:\n",
      "训练集缺失值总数: 32719392\n",
      "测试集缺失值总数: 3471619\n",
      "\n",
      "数据准备完成! 可以开始模型训练...\n",
      "测试集缺失值总数: 3471619\n",
      "\n",
      "数据准备完成! 可以开始模型训练...\n"
     ]
    }
   ],
   "source": [
    "# 准备训练数据\n",
    "print(\"=\"*100)\n",
    "print(\"步骤3: 准备训练数据\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "# 分离特征和目标\n",
    "X_train = train_data_processed.drop(columns=['CUST_NO', 'FLAG', 'DATA_DAT'], errors='ignore')\n",
    "y_train_original = train_data_processed['FLAG'].copy()  # 保存原始标签(1-10)用于最终输出\n",
    "y_train = train_data_processed['FLAG'] - 1  # 转换为0-9用于模型训练\n",
    "X_test = test_data_processed.drop(columns=['CUST_NO', 'DATA_DAT'], errors='ignore')\n",
    "\n",
    "print(f\"\\n训练集特征形状: {X_train.shape}\")\n",
    "print(f\"训练集目标形状: {y_train.shape}\")\n",
    "print(f\"测试集特征形状: {X_test.shape}\")\n",
    "\n",
    "# 检查目标变量分布\n",
    "print(f\"\\n原始目标变量分布 (1-10):\")\n",
    "flag_counts = y_train_original.value_counts().sort_index()\n",
    "for flag, count in flag_counts.items():\n",
    "    print(f\"  类别 {flag}: {count:6d} ({count/len(y_train)*100:5.2f}%)\")\n",
    "\n",
    "print(f\"\\n转换后目标变量分布 (0-9,用于模型训练):\")\n",
    "flag_counts_model = y_train.value_counts().sort_index()\n",
    "for flag, count in flag_counts_model.items():\n",
    "    print(f\"  类别 {flag}: {count:6d} ({count/len(y_train)*100:5.2f}%)\")\n",
    "\n",
    "# 获取特征列表\n",
    "features = X_train.columns.tolist()\n",
    "print(f\"\\n总特征数: {len(features)}\")\n",
    "\n",
    "# 检查数据质量\n",
    "print(f\"\\n数据质量检查:\")\n",
    "print(f\"训练集缺失值总数: {X_train.isnull().sum().sum()}\")\n",
    "print(f\"测试集缺失值总数: {X_test.isnull().sum().sum()}\")\n",
    "\n",
    "print(f\"\\n数据准备完成! 可以开始模型训练...\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "98efaf15",
   "metadata": {},
   "source": [
    "## 模型训练工具类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "05716ac6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "多模型训练器已定义!\n"
     ]
    }
   ],
   "source": [
    "class MultiModelTrainer:\n",
    "    \"\"\"\n",
    "    多模型训练器 - 支持LightGBM、CatBoost、XGBoost\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, X_train, y_train, X_test, n_splits=5, random_state=2024):\n",
    "        \"\"\"\n",
    "        初始化训练器\n",
    "        \n",
    "        参数:\n",
    "        - X_train: 训练特征\n",
    "        - y_train: 训练标签\n",
    "        - X_test: 测试特征\n",
    "        - n_splits: K折交叉验证折数\n",
    "        - random_state: 随机种子\n",
    "        \"\"\"\n",
    "        self.X_train = X_train\n",
    "        self.y_train = y_train\n",
    "        self.X_test = X_test\n",
    "        self.n_splits = n_splits\n",
    "        self.random_state = random_state\n",
    "        \n",
    "        # 存储结果\n",
    "        self.oof_predictions = {}  # OOF预测\n",
    "        self.test_predictions = {}  # 测试集预测\n",
    "        self.cv_scores = {}  # 交叉验证分数\n",
    "        self.models = {}  # 训练好的模型\n",
    "        self.feature_importance = {}  # 特征重要性\n",
    "        \n",
    "    def find_best_threshold(self, y_true, y_pred_proba):\n",
    "        \"\"\"\n",
    "        找到最佳阈值以最大化Macro-F1\n",
    "        \n",
    "        参数:\n",
    "        - y_true: 真实标签\n",
    "        - y_pred_proba: 预测概率\n",
    "        \n",
    "        返回:\n",
    "        - best_threshold: 最佳阈值字典 {类别: 阈值}\n",
    "        - best_score: 最佳分数\n",
    "        \"\"\"\n",
    "        # 获取所有类别\n",
    "        classes = np.unique(y_true)\n",
    "        n_classes = len(classes)\n",
    "        \n",
    "        # 为每个类别独立寻找最佳阈值\n",
    "        best_thresholds = {}\n",
    "        \n",
    "        # 简单策略: 对每个类别使用相同的阈值范围\n",
    "        thresholds = np.arange(0.1, 0.9, 0.02)\n",
    "        best_score = 0\n",
    "        best_threshold = 0.5\n",
    "        \n",
    "        for thresh in thresholds:\n",
    "            # 将概率转换为类别\n",
    "            y_pred = []\n",
    "            for i in range(len(y_pred_proba)):\n",
    "                # 选择概率最大的类别\n",
    "                pred_class = classes[np.argmax(y_pred_proba[i])]\n",
    "                # 如果最大概率小于阈值，则预测为最频繁的类别\n",
    "                if np.max(y_pred_proba[i]) < thresh:\n",
    "                    pred_class = y_true.mode()[0]\n",
    "                y_pred.append(pred_class)\n",
    "            \n",
    "            score = f1_score(y_true, y_pred, average='macro')\n",
    "            if score > best_score:\n",
    "                best_score = score\n",
    "                best_threshold = thresh\n",
    "        \n",
    "        return best_threshold, best_score\n",
    "    \n",
    "    def train_lightgbm(self, params=None):\n",
    "        \"\"\"\n",
    "        训练LightGBM模型\n",
    "        \n",
    "        参数:\n",
    "        - params: 模型参数\n",
    "        \n",
    "        返回:\n",
    "        - oof_pred: OOF预测\n",
    "        - test_pred: 测试集预测\n",
    "        - cv_score: 交叉验证平均分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"训练 LightGBM 模型\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        if params is None:\n",
    "            params = {\n",
    "                'objective': 'multiclass',\n",
    "                'num_class': len(np.unique(self.y_train)),\n",
    "                'metric': 'multi_logloss',\n",
    "                'boosting_type': 'gbdt',\n",
    "                'num_leaves': 31,\n",
    "                'max_depth': -1,\n",
    "                'learning_rate': 0.05,\n",
    "                'feature_fraction': 0.8,\n",
    "                'bagging_fraction': 0.8,\n",
    "                'bagging_freq': 5,\n",
    "                'min_child_samples': 20,\n",
    "                'min_child_weight': 0.001,\n",
    "                'lambda_l1': 0.1,\n",
    "                'lambda_l2': 0.1,\n",
    "                'verbosity': -1,\n",
    "                'seed': self.random_state,\n",
    "                'n_jobs': -1\n",
    "            }\n",
    "        \n",
    "        print(\"\\n模型参数:\")\n",
    "        for key, value in params.items():\n",
    "            print(f\"  {key}: {value}\")\n",
    "        \n",
    "        # 初始化\n",
    "        n_classes = len(np.unique(self.y_train))\n",
    "        oof_pred = np.zeros((len(self.X_train), n_classes))\n",
    "        test_pred = np.zeros((len(self.X_test), n_classes))\n",
    "        cv_scores = []\n",
    "        models = []\n",
    "        feature_imp = pd.DataFrame()\n",
    "        \n",
    "        # K折交叉验证\n",
    "        kfold = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state)\n",
    "        \n",
    "        for fold, (train_idx, valid_idx) in enumerate(kfold.split(self.X_train, self.y_train), 1):\n",
    "            print(f\"\\n{'='*50}\")\n",
    "            print(f\"Fold {fold}/{self.n_splits}\")\n",
    "            print(f\"{'='*50}\")\n",
    "            \n",
    "            X_tr, X_val = self.X_train.iloc[train_idx], self.X_train.iloc[valid_idx]\n",
    "            y_tr, y_val = self.y_train.iloc[train_idx], self.y_train.iloc[valid_idx]\n",
    "            \n",
    "            # 创建数据集\n",
    "            train_data = lgb.Dataset(X_tr, label=y_tr)\n",
    "            valid_data = lgb.Dataset(X_val, label=y_val, reference=train_data)\n",
    "            \n",
    "            # 训练模型\n",
    "            model = lgb.train(\n",
    "                params,\n",
    "                train_data,\n",
    "                num_boost_round=10000,\n",
    "                valid_sets=[train_data, valid_data],\n",
    "                valid_names=['train', 'valid'],\n",
    "                callbacks=[\n",
    "                    lgb.early_stopping(stopping_rounds=100, verbose=False),\n",
    "                    lgb.log_evaluation(period=500)\n",
    "                ]\n",
    "            )\n",
    "            \n",
    "            # 预测\n",
    "            oof_pred[valid_idx] = model.predict(X_val, num_iteration=model.best_iteration)\n",
    "            test_pred += model.predict(self.X_test, num_iteration=model.best_iteration) / self.n_splits\n",
    "            \n",
    "            # 计算Macro-F1\n",
    "            oof_pred_labels = np.argmax(oof_pred[valid_idx], axis=1)\n",
    "            fold_score = f1_score(y_val, oof_pred_labels, average='macro')\n",
    "            cv_scores.append(fold_score)\n",
    "            \n",
    "            print(f\"\\nFold {fold} Macro-F1: {fold_score:.6f}\")\n",
    "            print(f\"Best iteration: {model.best_iteration}\")\n",
    "            \n",
    "            # 保存模型\n",
    "            models.append(model)\n",
    "            \n",
    "            # 特征重要性\n",
    "            fold_importance = pd.DataFrame({\n",
    "                'feature': self.X_train.columns,\n",
    "                'importance': model.feature_importance(importance_type='gain'),\n",
    "                'fold': fold\n",
    "            })\n",
    "            feature_imp = pd.concat([feature_imp, fold_importance], axis=0)\n",
    "        \n",
    "        # 计算最终OOF分数\n",
    "        oof_pred_labels = np.argmax(oof_pred, axis=1)\n",
    "        oof_score = f1_score(self.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\n{'='*100}\")\n",
    "        print(f\"LightGBM 训练完成!\")\n",
    "        print(f\"{'='*100}\")\n",
    "        print(f\"OOF Macro-F1: {oof_score:.6f}\")\n",
    "        print(f\"CV Macro-F1: {np.mean(cv_scores):.6f} (+/- {np.std(cv_scores):.6f})\")\n",
    "        \n",
    "        # 保存结果\n",
    "        self.oof_predictions['lgb'] = oof_pred\n",
    "        self.test_predictions['lgb'] = test_pred\n",
    "        self.cv_scores['lgb'] = np.mean(cv_scores)\n",
    "        self.models['lgb'] = models\n",
    "        self.feature_importance['lgb'] = feature_imp\n",
    "        \n",
    "        return oof_pred, test_pred, np.mean(cv_scores)\n",
    "    \n",
    "    def train_catboost(self, params=None):\n",
    "        \"\"\"\n",
    "        训练CatBoost模型\n",
    "        \n",
    "        参数:\n",
    "        - params: 模型参数\n",
    "        \n",
    "        返回:\n",
    "        - oof_pred: OOF预测\n",
    "        - test_pred: 测试集预测\n",
    "        - cv_score: 交叉验证平均分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"训练 CatBoost 模型\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        if params is None:\n",
    "            params = {\n",
    "                'loss_function': 'MultiClass',\n",
    "                'eval_metric': 'TotalF1:average=Macro',\n",
    "                'iterations': 10000,\n",
    "                'learning_rate': 0.05,\n",
    "                'depth': 6,\n",
    "                'l2_leaf_reg': 3,\n",
    "                'bootstrap_type': 'Bernoulli',\n",
    "                'subsample': 0.8,\n",
    "                'random_strength': 1,\n",
    "                'bagging_temperature': 1,\n",
    "                'od_type': 'Iter',\n",
    "                'od_wait': 100,\n",
    "                'random_seed': self.random_state,\n",
    "                'verbose': 500,\n",
    "                'thread_count': -1\n",
    "            }\n",
    "        \n",
    "        print(\"\\n模型参数:\")\n",
    "        for key, value in params.items():\n",
    "            print(f\"  {key}: {value}\")\n",
    "        \n",
    "        # 初始化\n",
    "        n_classes = len(np.unique(self.y_train))\n",
    "        oof_pred = np.zeros((len(self.X_train), n_classes))\n",
    "        test_pred = np.zeros((len(self.X_test), n_classes))\n",
    "        cv_scores = []\n",
    "        models = []\n",
    "        feature_imp = pd.DataFrame()\n",
    "        \n",
    "        # K折交叉验证\n",
    "        kfold = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state)\n",
    "        \n",
    "        for fold, (train_idx, valid_idx) in enumerate(kfold.split(self.X_train, self.y_train), 1):\n",
    "            print(f\"\\n{'='*50}\")\n",
    "            print(f\"Fold {fold}/{self.n_splits}\")\n",
    "            print(f\"{'='*50}\")\n",
    "            \n",
    "            X_tr, X_val = self.X_train.iloc[train_idx], self.X_train.iloc[valid_idx]\n",
    "            y_tr, y_val = self.y_train.iloc[train_idx], self.y_train.iloc[valid_idx]\n",
    "            \n",
    "            # 创建数据集\n",
    "            train_pool = cat.Pool(X_tr, y_tr)\n",
    "            valid_pool = cat.Pool(X_val, y_val)\n",
    "            \n",
    "            # 训练模型\n",
    "            model = cat.CatBoost(params)\n",
    "            model.fit(\n",
    "                train_pool,\n",
    "                eval_set=valid_pool,\n",
    "                use_best_model=True,\n",
    "                verbose_eval=500\n",
    "            )\n",
    "            \n",
    "            # 预测\n",
    "            oof_pred[valid_idx] = model.predict(X_val, prediction_type='Probability')\n",
    "            test_pred += model.predict(self.X_test, prediction_type='Probability') / self.n_splits\n",
    "            \n",
    "            # 计算Macro-F1\n",
    "            oof_pred_labels = np.argmax(oof_pred[valid_idx], axis=1)\n",
    "            fold_score = f1_score(y_val, oof_pred_labels, average='macro')\n",
    "            cv_scores.append(fold_score)\n",
    "            \n",
    "            print(f\"\\nFold {fold} Macro-F1: {fold_score:.6f}\")\n",
    "            print(f\"Best iteration: {model.best_iteration_}\")\n",
    "            \n",
    "            # 保存模型\n",
    "            models.append(model)\n",
    "            \n",
    "            # 特征重要性\n",
    "            fold_importance = pd.DataFrame({\n",
    "                'feature': self.X_train.columns,\n",
    "                'importance': model.get_feature_importance(),\n",
    "                'fold': fold\n",
    "            })\n",
    "            feature_imp = pd.concat([feature_imp, fold_importance], axis=0)\n",
    "        \n",
    "        # 计算最终OOF分数\n",
    "        oof_pred_labels = np.argmax(oof_pred, axis=1)\n",
    "        oof_score = f1_score(self.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\n{'='*100}\")\n",
    "        print(f\"CatBoost 训练完成!\")\n",
    "        print(f\"{'='*100}\")\n",
    "        print(f\"OOF Macro-F1: {oof_score:.6f}\")\n",
    "        print(f\"CV Macro-F1: {np.mean(cv_scores):.6f} (+/- {np.std(cv_scores):.6f})\")\n",
    "        \n",
    "        # 保存结果\n",
    "        self.oof_predictions['cat'] = oof_pred\n",
    "        self.test_predictions['cat'] = test_pred\n",
    "        self.cv_scores['cat'] = np.mean(cv_scores)\n",
    "        self.models['cat'] = models\n",
    "        self.feature_importance['cat'] = feature_imp\n",
    "        \n",
    "        return oof_pred, test_pred, np.mean(cv_scores)\n",
    "    \n",
    "    def train_xgboost(self, params=None):\n",
    "        \"\"\"\n",
    "        训练XGBoost模型\n",
    "        \n",
    "        参数:\n",
    "        - params: 模型参数\n",
    "        \n",
    "        返回:\n",
    "        - oof_pred: OOF预测\n",
    "        - test_pred: 测试集预测\n",
    "        - cv_score: 交叉验证平均分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"训练 XGBoost 模型\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        if params is None:\n",
    "            params = {\n",
    "                'objective': 'multi:softprob',\n",
    "                'num_class': len(np.unique(self.y_train)),\n",
    "                'eval_metric': 'mlogloss',\n",
    "                'tree_method': 'hist',\n",
    "                'max_depth': 6,\n",
    "                'learning_rate': 0.05,\n",
    "                'subsample': 0.8,\n",
    "                'colsample_bytree': 0.8,\n",
    "                'min_child_weight': 1,\n",
    "                'gamma': 0,\n",
    "                'reg_alpha': 0.1,\n",
    "                'reg_lambda': 1,\n",
    "                'seed': self.random_state,\n",
    "                'n_jobs': -1,\n",
    "                'verbosity': 1\n",
    "            }\n",
    "        \n",
    "        print(\"\\n模型参数:\")\n",
    "        for key, value in params.items():\n",
    "            print(f\"  {key}: {value}\")\n",
    "        \n",
    "        # 初始化\n",
    "        n_classes = len(np.unique(self.y_train))\n",
    "        oof_pred = np.zeros((len(self.X_train), n_classes))\n",
    "        test_pred = np.zeros((len(self.X_test), n_classes))\n",
    "        cv_scores = []\n",
    "        models = []\n",
    "        feature_imp = pd.DataFrame()\n",
    "        \n",
    "        # K折交叉验证\n",
    "        kfold = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state)\n",
    "        \n",
    "        for fold, (train_idx, valid_idx) in enumerate(kfold.split(self.X_train, self.y_train), 1):\n",
    "            print(f\"\\n{'='*50}\")\n",
    "            print(f\"Fold {fold}/{self.n_splits}\")\n",
    "            print(f\"{'='*50}\")\n",
    "            \n",
    "            X_tr, X_val = self.X_train.iloc[train_idx], self.X_train.iloc[valid_idx]\n",
    "            y_tr, y_val = self.y_train.iloc[train_idx], self.y_train.iloc[valid_idx]\n",
    "            \n",
    "            # 创建数据集\n",
    "            dtrain = xgb.DMatrix(X_tr, label=y_tr)\n",
    "            dvalid = xgb.DMatrix(X_val, label=y_val)\n",
    "            dtest = xgb.DMatrix(self.X_test)\n",
    "            \n",
    "            # 训练模型\n",
    "            evals = [(dtrain, 'train'), (dvalid, 'valid')]\n",
    "            model = xgb.train(\n",
    "                params,\n",
    "                dtrain,\n",
    "                num_boost_round=10000,\n",
    "                evals=evals,\n",
    "                early_stopping_rounds=100,\n",
    "                verbose_eval=500\n",
    "            )\n",
    "            \n",
    "            # 预测\n",
    "            oof_pred[valid_idx] = model.predict(dvalid, iteration_range=(0, model.best_iteration + 1))\n",
    "            test_pred += model.predict(dtest, iteration_range=(0, model.best_iteration + 1)) / self.n_splits\n",
    "            \n",
    "            # 计算Macro-F1\n",
    "            oof_pred_labels = np.argmax(oof_pred[valid_idx], axis=1)\n",
    "            fold_score = f1_score(y_val, oof_pred_labels, average='macro')\n",
    "            cv_scores.append(fold_score)\n",
    "            \n",
    "            print(f\"\\nFold {fold} Macro-F1: {fold_score:.6f}\")\n",
    "            print(f\"Best iteration: {model.best_iteration}\")\n",
    "            \n",
    "            # 保存模型\n",
    "            models.append(model)\n",
    "            \n",
    "            # 特征重要性\n",
    "            importance_dict = model.get_score(importance_type='gain')\n",
    "            fold_importance = pd.DataFrame({\n",
    "                'feature': list(importance_dict.keys()),\n",
    "                'importance': list(importance_dict.values()),\n",
    "                'fold': fold\n",
    "            })\n",
    "            feature_imp = pd.concat([feature_imp, fold_importance], axis=0)\n",
    "        \n",
    "        # 计算最终OOF分数\n",
    "        oof_pred_labels = np.argmax(oof_pred, axis=1)\n",
    "        oof_score = f1_score(self.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\n{'='*100}\")\n",
    "        print(f\"XGBoost 训练完成!\")\n",
    "        print(f\"{'='*100}\")\n",
    "        print(f\"OOF Macro-F1: {oof_score:.6f}\")\n",
    "        print(f\"CV Macro-F1: {np.mean(cv_scores):.6f} (+/- {np.std(cv_scores):.6f})\")\n",
    "        \n",
    "        # 保存结果\n",
    "        self.oof_predictions['xgb'] = oof_pred\n",
    "        self.test_predictions['xgb'] = test_pred\n",
    "        self.cv_scores['xgb'] = np.mean(cv_scores)\n",
    "        self.models['xgb'] = models\n",
    "        self.feature_importance['xgb'] = feature_imp\n",
    "        \n",
    "        return oof_pred, test_pred, np.mean(cv_scores)\n",
    "    \n",
    "    def get_model_summary(self):\n",
    "        \"\"\"\n",
    "        获取所有模型的汇总信息\n",
    "        \n",
    "        返回:\n",
    "        - summary_df: 汇总DataFrame\n",
    "        \"\"\"\n",
    "        if not self.cv_scores:\n",
    "            print(\"还没有训练任何模型!\")\n",
    "            return None\n",
    "        \n",
    "        summary_data = []\n",
    "        for model_name, score in self.cv_scores.items():\n",
    "            oof_pred_labels = np.argmax(self.oof_predictions[model_name], axis=1)\n",
    "            oof_score = f1_score(self.y_train, oof_pred_labels, average='macro')\n",
    "            \n",
    "            summary_data.append({\n",
    "                '模型': model_name.upper(),\n",
    "                'CV分数': f\"{score:.6f}\",\n",
    "                'OOF分数': f\"{oof_score:.6f}\"\n",
    "            })\n",
    "        \n",
    "        summary_df = pd.DataFrame(summary_data)\n",
    "        return summary_df\n",
    "\n",
    "print(\"多模型训练器已定义!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "801fb1d3",
   "metadata": {},
   "source": [
    "## 开始模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "b1405f0c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "====================================================================================================\n",
      "步骤4: 多模型训练\n",
      "====================================================================================================\n",
      "\n",
      "训练器已创建!\n",
      "交叉验证折数: 5\n",
      "随机种子: 2024\n",
      "训练样本数: 51397\n",
      "测试样本数: 5975\n",
      "特征数: 1244\n"
     ]
    }
   ],
   "source": [
    "# 创建训练器\n",
    "print(\"=\"*100)\n",
    "print(\"步骤4: 多模型训练\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "trainer = MultiModelTrainer(\n",
    "    X_train=X_train,\n",
    "    y_train=y_train,\n",
    "    X_test=X_test,\n",
    "    n_splits=5,\n",
    "    random_state=2024\n",
    ")\n",
    "\n",
    "print(f\"\\n训练器已创建!\")\n",
    "print(f\"交叉验证折数: {trainer.n_splits}\")\n",
    "print(f\"随机种子: {trainer.random_state}\")\n",
    "print(f\"训练样本数: {len(X_train)}\")\n",
    "print(f\"测试样本数: {len(X_test)}\")\n",
    "print(f\"特征数: {X_train.shape[1]}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7fe42f53",
   "metadata": {},
   "source": [
    "### 训练LightGBM模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "1f060955",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "====================================================================================================\n",
      "训练 LightGBM 模型\n",
      "====================================================================================================\n",
      "\n",
      "模型参数:\n",
      "  objective: multiclass\n",
      "  num_class: 10\n",
      "  metric: multi_logloss\n",
      "  boosting_type: gbdt\n",
      "  num_leaves: 63\n",
      "  max_depth: 8\n",
      "  learning_rate: 0.03\n",
      "  feature_fraction: 0.8\n",
      "  bagging_fraction: 0.8\n",
      "  bagging_freq: 5\n",
      "  min_child_samples: 30\n",
      "  min_child_weight: 0.001\n",
      "  lambda_l1: 0.2\n",
      "  lambda_l2: 0.2\n",
      "  min_split_gain: 0.01\n",
      "  verbosity: -1\n",
      "  seed: 2024\n",
      "  n_jobs: -1\n",
      "\n",
      "==================================================\n",
      "Fold 1/5\n",
      "==================================================\n",
      "\n",
      "Fold 1 Macro-F1: 0.589617\n",
      "Best iteration: 180\n",
      "\n",
      "==================================================\n",
      "Fold 2/5\n",
      "==================================================\n",
      "\n",
      "Fold 1 Macro-F1: 0.589617\n",
      "Best iteration: 180\n",
      "\n",
      "==================================================\n",
      "Fold 2/5\n",
      "==================================================\n",
      "\n",
      "Fold 2 Macro-F1: 0.595502\n",
      "Best iteration: 169\n",
      "\n",
      "==================================================\n",
      "Fold 3/5\n",
      "==================================================\n",
      "\n",
      "Fold 2 Macro-F1: 0.595502\n",
      "Best iteration: 169\n",
      "\n",
      "==================================================\n",
      "Fold 3/5\n",
      "==================================================\n",
      "\n",
      "Fold 3 Macro-F1: 0.599316\n",
      "Best iteration: 184\n",
      "\n",
      "==================================================\n",
      "Fold 4/5\n",
      "==================================================\n",
      "\n",
      "Fold 3 Macro-F1: 0.599316\n",
      "Best iteration: 184\n",
      "\n",
      "==================================================\n",
      "Fold 4/5\n",
      "==================================================\n",
      "\n",
      "Fold 4 Macro-F1: 0.585885\n",
      "Best iteration: 174\n",
      "\n",
      "==================================================\n",
      "Fold 5/5\n",
      "==================================================\n",
      "\n",
      "Fold 4 Macro-F1: 0.585885\n",
      "Best iteration: 174\n",
      "\n",
      "==================================================\n",
      "Fold 5/5\n",
      "==================================================\n",
      "\n",
      "Fold 5 Macro-F1: 0.590631\n",
      "Best iteration: 176\n",
      "\n",
      "====================================================================================================\n",
      "LightGBM 训练完成!\n",
      "====================================================================================================\n",
      "OOF Macro-F1: 0.592268\n",
      "CV Macro-F1: 0.592190 (+/- 0.004702)\n",
      "\n",
      "Fold 5 Macro-F1: 0.590631\n",
      "Best iteration: 176\n",
      "\n",
      "====================================================================================================\n",
      "LightGBM 训练完成!\n",
      "====================================================================================================\n",
      "OOF Macro-F1: 0.592268\n",
      "CV Macro-F1: 0.592190 (+/- 0.004702)\n"
     ]
    }
   ],
   "source": [
    "# LightGBM 参数配置\n",
    "lgb_params = {\n",
    "    'objective': 'multiclass',\n",
    "    'num_class': len(np.unique(y_train)),\n",
    "    'metric': 'multi_logloss',\n",
    "    'boosting_type': 'gbdt',\n",
    "    'num_leaves': 63,\n",
    "    'max_depth': 8,\n",
    "    'learning_rate': 0.03,\n",
    "    'feature_fraction': 0.8,\n",
    "    'bagging_fraction': 0.8,\n",
    "    'bagging_freq': 5,\n",
    "    'min_child_samples': 30,\n",
    "    'min_child_weight': 0.001,\n",
    "    'lambda_l1': 0.2,\n",
    "    'lambda_l2': 0.2,\n",
    "    'min_split_gain': 0.01,\n",
    "    'verbosity': -1,\n",
    "    'seed': 2024,\n",
    "    'n_jobs': -1\n",
    "}\n",
    "\n",
    "# 训练LightGBM\n",
    "lgb_oof, lgb_test, lgb_score = trainer.train_lightgbm(params=lgb_params)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1cb7e3b7",
   "metadata": {},
   "source": [
    "### 训练CatBoost模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "0513b114",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "====================================================================================================\n",
      "训练 CatBoost 模型\n",
      "====================================================================================================\n",
      "\n",
      "模型参数:\n",
      "  loss_function: MultiClass\n",
      "  eval_metric: TotalF1:average=Macro\n",
      "  iterations: 500\n",
      "  learning_rate: 0.03\n",
      "  depth: 8\n",
      "  l2_leaf_reg: 3\n",
      "  bootstrap_type: Bernoulli\n",
      "  subsample: 0.8\n",
      "  random_strength: 1\n",
      "  od_type: Iter\n",
      "  od_wait: 100\n",
      "  random_seed: 2024\n",
      "  verbose: 500\n",
      "  thread_count: -1\n",
      "\n",
      "==================================================\n",
      "Fold 1/5\n",
      "==================================================\n",
      "0:\tlearn: 0.4766601\ttest: 0.4704391\tbest: 0.4704391 (0)\ttotal: 2.09s\tremaining: 17m 24s\n",
      "0:\tlearn: 0.4766601\ttest: 0.4704391\tbest: 0.4704391 (0)\ttotal: 2.09s\tremaining: 17m 24s\n",
      "499:\tlearn: 0.6096902\ttest: 0.5668391\tbest: 0.5671096 (490)\ttotal: 16m 7s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5671095892\n",
      "bestIteration = 490\n",
      "\n",
      "Shrink model to first 491 iterations.\n",
      "\n",
      "Fold 1 Macro-F1: 0.567110\n",
      "Best iteration: 490\n",
      "\n",
      "==================================================\n",
      "Fold 2/5\n",
      "==================================================\n",
      "499:\tlearn: 0.6096902\ttest: 0.5668391\tbest: 0.5671096 (490)\ttotal: 16m 7s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5671095892\n",
      "bestIteration = 490\n",
      "\n",
      "Shrink model to first 491 iterations.\n",
      "\n",
      "Fold 1 Macro-F1: 0.567110\n",
      "Best iteration: 490\n",
      "\n",
      "==================================================\n",
      "Fold 2/5\n",
      "==================================================\n",
      "0:\tlearn: 0.4758458\ttest: 0.4741995\tbest: 0.4741995 (0)\ttotal: 1.4s\tremaining: 11m 36s\n",
      "0:\tlearn: 0.4758458\ttest: 0.4741995\tbest: 0.4741995 (0)\ttotal: 1.4s\tremaining: 11m 36s\n",
      "499:\tlearn: 0.6107227\ttest: 0.5678441\tbest: 0.5678768 (497)\ttotal: 11m 56s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5678767815\n",
      "bestIteration = 497\n",
      "\n",
      "Shrink model to first 498 iterations.\n",
      "\n",
      "Fold 2 Macro-F1: 0.567877\n",
      "Best iteration: 497\n",
      "\n",
      "==================================================\n",
      "Fold 3/5\n",
      "==================================================\n",
      "0:\tlearn: 0.4051419\ttest: 0.3979031\tbest: 0.3979031 (0)\ttotal: 1.64s\tremaining: 13m 39s\n",
      "499:\tlearn: 0.6100197\ttest: 0.5737019\tbest: 0.5737434 (498)\ttotal: 12m 27s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5737433772\n",
      "bestIteration = 498\n",
      "\n",
      "Shrink model to first 499 iterations.\n",
      "\n",
      "Fold 3 Macro-F1: 0.573743\n",
      "Best iteration: 498\n",
      "\n",
      "==================================================\n",
      "Fold 4/5\n",
      "==================================================\n",
      "0:\tlearn: 0.4056743\ttest: 0.4048858\tbest: 0.4048858 (0)\ttotal: 1.41s\tremaining: 11m 41s\n",
      "499:\tlearn: 0.6097631\ttest: 0.5659175\tbest: 0.5659175 (499)\ttotal: 12m 12s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5659175163\n",
      "bestIteration = 499\n",
      "\n",
      "\n",
      "Fold 4 Macro-F1: 0.565918\n",
      "Best iteration: 499\n",
      "\n",
      "==================================================\n",
      "Fold 5/5\n",
      "==================================================\n",
      "0:\tlearn: 0.4104113\ttest: 0.4049723\tbest: 0.4049723 (0)\ttotal: 1.4s\tremaining: 11m 38s\n",
      "499:\tlearn: 0.6108498\ttest: 0.5717266\tbest: 0.5718486 (492)\ttotal: 12m 14s\tremaining: 0us\n",
      "\n",
      "bestTest = 0.5718486278\n",
      "bestIteration = 492\n",
      "\n",
      "Shrink model to first 493 iterations.\n",
      "\n",
      "Fold 5 Macro-F1: 0.571849\n",
      "Best iteration: 492\n",
      "\n",
      "====================================================================================================\n",
      "CatBoost 训练完成!\n",
      "====================================================================================================\n",
      "OOF Macro-F1: 0.569255\n",
      "CV Macro-F1: 0.569299 (+/- 0.002983)\n"
     ]
    }
   ],
   "source": [
    "# CatBoost 参数配置\n",
    "cat_params = {\n",
    "    'loss_function': 'MultiClass',\n",
    "    'eval_metric': 'TotalF1:average=Macro',\n",
    "    'iterations': 500,\n",
    "    'learning_rate': 0.03,\n",
    "    'depth': 8,\n",
    "    'l2_leaf_reg': 3,\n",
    "    'bootstrap_type': 'Bernoulli',\n",
    "    'subsample': 0.8,\n",
    "    'random_strength': 1,\n",
    "    'od_type': 'Iter',\n",
    "    'od_wait': 100,\n",
    "    'random_seed': 2024,\n",
    "    'verbose': 500,\n",
    "    'thread_count': -1\n",
    "}\n",
    "\n",
    "# 训练CatBoost\n",
    "cat_oof, cat_test, cat_score = trainer.train_catboost(params=cat_params)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6af2f7b4",
   "metadata": {},
   "source": [
    "### 训练XGBoost模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67c9da79",
   "metadata": {},
   "outputs": [],
   "source": [
    "# XGBoost 参数配置\n",
    "xgb_params = {\n",
    "    'objective': 'multi:softprob',\n",
    "    'num_class': len(np.unique(y_train)),\n",
    "    'eval_metric': 'mlogloss',\n",
    "    'tree_method': 'hist',\n",
    "    'max_depth': 8,\n",
    "    'learning_rate': 0.03,\n",
    "    'subsample': 0.8,\n",
    "    'colsample_bytree': 0.8,\n",
    "    'colsample_bylevel': 0.8,\n",
    "    'min_child_weight': 3,\n",
    "    'gamma': 0.01,\n",
    "    'reg_alpha': 0.2,\n",
    "    'reg_lambda': 1,\n",
    "    'seed': 2024,\n",
    "    'n_jobs': -1,\n",
    "    'verbosity': 1\n",
    "}\n",
    "\n",
    "# 训练XGBoost\n",
    "xgb_oof, xgb_test, xgb_score = trainer.train_xgboost(params=xgb_params)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6d5f25e7",
   "metadata": {},
   "source": [
    "### 查看单模型表现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a03f188",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取模型汇总\n",
    "model_summary = trainer.get_model_summary()\n",
    "print(\"\\n\" + \"=\"*100)\n",
    "print(\"单模型表现汇总\")\n",
    "print(\"=\"*100)\n",
    "print(model_summary.to_string(index=False))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6d469914",
   "metadata": {},
   "source": [
    "## 模型融合与集成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2569a0f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ModelEnsemble:\n",
    "    \"\"\"\n",
    "    模型融合器 - 支持多种融合策略\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, trainer):\n",
    "        \"\"\"\n",
    "        初始化融合器\n",
    "        \n",
    "        参数:\n",
    "        - trainer: MultiModelTrainer实例\n",
    "        \"\"\"\n",
    "        self.trainer = trainer\n",
    "        self.best_weights = None\n",
    "        self.best_score = 0\n",
    "        \n",
    "    def simple_average(self):\n",
    "        \"\"\"\n",
    "        简单平均融合\n",
    "        \n",
    "        返回:\n",
    "        - ensemble_oof: 融合后的OOF预测\n",
    "        - ensemble_test: 融合后的测试集预测\n",
    "        - score: OOF分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"策略1: 简单平均融合\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        n_models = len(self.trainer.oof_predictions)\n",
    "        \n",
    "        # OOF融合\n",
    "        ensemble_oof = np.zeros_like(list(self.trainer.oof_predictions.values())[0])\n",
    "        for model_name, oof_pred in self.trainer.oof_predictions.items():\n",
    "            ensemble_oof += oof_pred / n_models\n",
    "            print(f\"模型 {model_name.upper()}: 权重 = {1/n_models:.4f}\")\n",
    "        \n",
    "        # 测试集融合\n",
    "        ensemble_test = np.zeros_like(list(self.trainer.test_predictions.values())[0])\n",
    "        for model_name, test_pred in self.trainer.test_predictions.items():\n",
    "            ensemble_test += test_pred / n_models\n",
    "        \n",
    "        # 计算分数\n",
    "        oof_pred_labels = np.argmax(ensemble_oof, axis=1)\n",
    "        score = f1_score(self.trainer.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\n简单平均 OOF Macro-F1: {score:.6f}\")\n",
    "        \n",
    "        return ensemble_oof, ensemble_test, score\n",
    "    \n",
    "    def weighted_average(self, weights=None):\n",
    "        \"\"\"\n",
    "        加权平均融合\n",
    "        \n",
    "        参数:\n",
    "        - weights: 权重字典 {model_name: weight}，如果为None则使用CV分数作为权重\n",
    "        \n",
    "        返回:\n",
    "        - ensemble_oof: 融合后的OOF预测\n",
    "        - ensemble_test: 融合后的测试集预测\n",
    "        - score: OOF分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"策略2: 加权平均融合\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        if weights is None:\n",
    "            # 使用CV分数作为权重\n",
    "            total_score = sum(self.trainer.cv_scores.values())\n",
    "            weights = {name: score/total_score for name, score in self.trainer.cv_scores.items()}\n",
    "            print(\"使用CV分数作为权重:\")\n",
    "        else:\n",
    "            print(\"使用自定义权重:\")\n",
    "        \n",
    "        # 确保权重和为1\n",
    "        weight_sum = sum(weights.values())\n",
    "        weights = {name: w/weight_sum for name, w in weights.items()}\n",
    "        \n",
    "        for model_name, weight in weights.items():\n",
    "            print(f\"模型 {model_name.upper()}: 权重 = {weight:.4f}\")\n",
    "        \n",
    "        # OOF融合\n",
    "        ensemble_oof = np.zeros_like(list(self.trainer.oof_predictions.values())[0])\n",
    "        for model_name, oof_pred in self.trainer.oof_predictions.items():\n",
    "            ensemble_oof += oof_pred * weights[model_name]\n",
    "        \n",
    "        # 测试集融合\n",
    "        ensemble_test = np.zeros_like(list(self.trainer.test_predictions.values())[0])\n",
    "        for model_name, test_pred in self.trainer.test_predictions.items():\n",
    "            ensemble_test += test_pred * weights[model_name]\n",
    "        \n",
    "        # 计算分数\n",
    "        oof_pred_labels = np.argmax(ensemble_oof, axis=1)\n",
    "        score = f1_score(self.trainer.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\n加权平均 OOF Macro-F1: {score:.6f}\")\n",
    "        \n",
    "        return ensemble_oof, ensemble_test, score\n",
    "    \n",
    "    def optimize_weights(self, n_trials=1000):\n",
    "        \"\"\"\n",
    "        优化融合权重以最大化Macro-F1\n",
    "        \n",
    "        参数:\n",
    "        - n_trials: 尝试次数\n",
    "        \n",
    "        返回:\n",
    "        - best_weights: 最优权重\n",
    "        - best_score: 最优分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(f\"策略3: 权重优化融合 (尝试 {n_trials} 次)\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        model_names = list(self.trainer.oof_predictions.keys())\n",
    "        n_models = len(model_names)\n",
    "        \n",
    "        best_weights = None\n",
    "        best_score = 0\n",
    "        \n",
    "        np.random.seed(2024)\n",
    "        \n",
    "        for trial in range(n_trials):\n",
    "            # 生成随机权重\n",
    "            weights = np.random.dirichlet(np.ones(n_models))\n",
    "            weights_dict = {name: w for name, w in zip(model_names, weights)}\n",
    "            \n",
    "            # 计算融合预测\n",
    "            ensemble_oof = np.zeros_like(list(self.trainer.oof_predictions.values())[0])\n",
    "            for model_name, oof_pred in self.trainer.oof_predictions.items():\n",
    "                ensemble_oof += oof_pred * weights_dict[model_name]\n",
    "            \n",
    "            # 计算分数\n",
    "            oof_pred_labels = np.argmax(ensemble_oof, axis=1)\n",
    "            score = f1_score(self.trainer.y_train, oof_pred_labels, average='macro')\n",
    "            \n",
    "            # 更新最优权重\n",
    "            if score > best_score:\n",
    "                best_score = score\n",
    "                best_weights = weights_dict.copy()\n",
    "            \n",
    "            # 每100次显示进度\n",
    "            if (trial + 1) % 100 == 0:\n",
    "                print(f\"尝试 {trial+1}/{n_trials}, 当前最优分数: {best_score:.6f}\")\n",
    "        \n",
    "        print(f\"\\n优化完成! 最优权重:\")\n",
    "        for model_name, weight in best_weights.items():\n",
    "            print(f\"模型 {model_name.upper()}: 权重 = {weight:.4f}\")\n",
    "        \n",
    "        # 使用最优权重进行融合\n",
    "        ensemble_oof = np.zeros_like(list(self.trainer.oof_predictions.values())[0])\n",
    "        for model_name, oof_pred in self.trainer.oof_predictions.items():\n",
    "            ensemble_oof += oof_pred * best_weights[model_name]\n",
    "        \n",
    "        ensemble_test = np.zeros_like(list(self.trainer.test_predictions.values())[0])\n",
    "        for model_name, test_pred in self.trainer.test_predictions.items():\n",
    "            ensemble_test += test_pred * best_weights[model_name]\n",
    "        \n",
    "        print(f\"\\n优化融合 OOF Macro-F1: {best_score:.6f}\")\n",
    "        \n",
    "        self.best_weights = best_weights\n",
    "        self.best_score = best_score\n",
    "        \n",
    "        return ensemble_oof, ensemble_test, best_score\n",
    "    \n",
    "    def rank_average(self):\n",
    "        \"\"\"\n",
    "        Rank平均融合\n",
    "        \n",
    "        返回:\n",
    "        - ensemble_oof: 融合后的OOF预测\n",
    "        - ensemble_test: 融合后的测试集预测\n",
    "        - score: OOF分数\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"策略4: Rank平均融合\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        # 将概率转换为排名\n",
    "        def prob_to_rank(probs):\n",
    "            ranks = np.zeros_like(probs)\n",
    "            for i in range(probs.shape[1]):\n",
    "                ranks[:, i] = np.argsort(np.argsort(-probs[:, i]))\n",
    "            return ranks\n",
    "        \n",
    "        # OOF融合\n",
    "        ensemble_oof_rank = np.zeros_like(list(self.trainer.oof_predictions.values())[0])\n",
    "        for model_name, oof_pred in self.trainer.oof_predictions.items():\n",
    "            oof_rank = prob_to_rank(oof_pred)\n",
    "            ensemble_oof_rank += oof_rank / len(self.trainer.oof_predictions)\n",
    "        \n",
    "        # 将排名转换回概率\n",
    "        ensemble_oof = np.exp(-ensemble_oof_rank / ensemble_oof_rank.max())\n",
    "        ensemble_oof = ensemble_oof / ensemble_oof.sum(axis=1, keepdims=True)\n",
    "        \n",
    "        # 测试集融合\n",
    "        ensemble_test_rank = np.zeros_like(list(self.trainer.test_predictions.values())[0])\n",
    "        for model_name, test_pred in self.trainer.test_predictions.items():\n",
    "            test_rank = prob_to_rank(test_pred)\n",
    "            ensemble_test_rank += test_rank / len(self.trainer.test_predictions)\n",
    "        \n",
    "        ensemble_test = np.exp(-ensemble_test_rank / ensemble_test_rank.max())\n",
    "        ensemble_test = ensemble_test / ensemble_test.sum(axis=1, keepdims=True)\n",
    "        \n",
    "        # 计算分数\n",
    "        oof_pred_labels = np.argmax(ensemble_oof, axis=1)\n",
    "        score = f1_score(self.trainer.y_train, oof_pred_labels, average='macro')\n",
    "        \n",
    "        print(f\"\\nRank平均 OOF Macro-F1: {score:.6f}\")\n",
    "        \n",
    "        return ensemble_oof, ensemble_test, score\n",
    "    \n",
    "    def compare_strategies(self, custom_weights=None):\n",
    "        \"\"\"\n",
    "        比较所有融合策略\n",
    "        \n",
    "        参数:\n",
    "        - custom_weights: 自定义权重字典\n",
    "        \n",
    "        返回:\n",
    "        - results: 融合结果字典\n",
    "        \"\"\"\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"步骤5: 模型融合策略对比\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        results = {}\n",
    "        \n",
    "        # 1. 简单平均\n",
    "        oof1, test1, score1 = self.simple_average()\n",
    "        results['simple'] = {'oof': oof1, 'test': test1, 'score': score1}\n",
    "        \n",
    "        # 2. 加权平均\n",
    "        oof2, test2, score2 = self.weighted_average(weights=custom_weights)\n",
    "        results['weighted'] = {'oof': oof2, 'test': test2, 'score': score2}\n",
    "        \n",
    "        # 3. 权重优化\n",
    "        oof3, test3, score3 = self.optimize_weights(n_trials=1000)\n",
    "        results['optimized'] = {'oof': oof3, 'test': test3, 'score': score3}\n",
    "        \n",
    "        # 4. Rank平均\n",
    "        oof4, test4, score4 = self.rank_average()\n",
    "        results['rank'] = {'oof': oof4, 'test': test4, 'score': score4}\n",
    "        \n",
    "        # 汇总对比\n",
    "        print(\"\\n\" + \"=\"*100)\n",
    "        print(\"融合策略对比汇总\")\n",
    "        print(\"=\"*100)\n",
    "        \n",
    "        comparison = pd.DataFrame({\n",
    "            '融合策略': ['简单平均', '加权平均', '权重优化', 'Rank平均'],\n",
    "            'OOF Macro-F1': [score1, score2, score3, score4]\n",
    "        })\n",
    "        comparison = comparison.sort_values('OOF Macro-F1', ascending=False).reset_index(drop=True)\n",
    "        print(comparison.to_string(index=False))\n",
    "        \n",
    "        # 找出最优策略\n",
    "        best_strategy = comparison.iloc[0]['融合策略']\n",
    "        best_score = comparison.iloc[0]['OOF Macro-F1']\n",
    "        print(f\"\\n最优融合策略: {best_strategy} (Macro-F1: {best_score:.6f})\")\n",
    "        \n",
    "        return results, comparison\n",
    "\n",
    "print(\"模型融合器已定义!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "31da357d",
   "metadata": {},
   "source": [
    "### 执行模型融合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f720ebb9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建融合器\n",
    "ensemble = ModelEnsemble(trainer)\n",
    "\n",
    "# 对比所有融合策略\n",
    "ensemble_results, ensemble_comparison = ensemble.compare_strategies()\n",
    "\n",
    "# 选择最优策略的预测结果\n",
    "best_strategy_name = ensemble_comparison.iloc[0]['融合策略']\n",
    "if '简单' in best_strategy_name:\n",
    "    best_ensemble_test = ensemble_results['simple']['test']\n",
    "elif '加权' in best_strategy_name:\n",
    "    best_ensemble_test = ensemble_results['weighted']['test']\n",
    "elif '优化' in best_strategy_name:\n",
    "    best_ensemble_test = ensemble_results['optimized']['test']\n",
    "else:\n",
    "    best_ensemble_test = ensemble_results['rank']['test']\n",
    "\n",
    "print(f\"\\n使用最优融合策略的预测结果\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "82f12209",
   "metadata": {},
   "source": [
    "## 生成提交文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "84edbf1f",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"=\"*100)\n",
    "print(\"步骤6: 生成提交文件\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "# 将概率转换为类别 (0-9)\n",
    "test_pred_labels = np.argmax(best_ensemble_test, axis=1)\n",
    "\n",
    "# 转换回原始标签 (1-10)\n",
    "test_pred_labels = test_pred_labels + 1\n",
    "\n",
    "# 创建提交DataFrame\n",
    "submission = pd.DataFrame({\n",
    "    'CUST_NO': test_target_cust['CUST_NO'],\n",
    "    'FLAG': test_pred_labels\n",
    "})\n",
    "\n",
    "print(f\"\\n提交文件形状: {submission.shape}\")\n",
    "print(f\"\\n预测类别分布 (1-10):\")\n",
    "flag_dist = submission['FLAG'].value_counts().sort_index()\n",
    "for flag, count in flag_dist.items():\n",
    "    print(f\"  类别 {flag}: {count:6d} ({count/len(submission)*100:5.2f}%)\")\n",
    "\n",
    "# 保存提交文件\n",
    "output_dir = './model'\n",
    "os.makedirs(output_dir, exist_ok=True)\n",
    "\n",
    "# 生成文件名 (包含最优分数)\n",
    "best_score = ensemble_comparison.iloc[0]['OOF Macro-F1']\n",
    "output_filename = f'upload_ensemble_MacroF1_{best_score:.6f}.csv'\n",
    "output_path = os.path.join(output_dir, output_filename)\n",
    "\n",
    "# 保存 (不包含列名和索引)\n",
    "submission.to_csv(output_path, index=False, header=False)\n",
    "\n",
    "print(f\"\\n提交文件已保存: {output_path}\")\n",
    "\n",
    "# 同时保存一份包含列名的版本用于查看\n",
    "output_path_with_header = os.path.join(output_dir, f'upload_ensemble_MacroF1_{best_score:.6f}_with_header.csv')\n",
    "submission.to_csv(output_path_with_header, index=False, header=True)\n",
    "print(f\"带列名版本已保存: {output_path_with_header}\")\n",
    "\n",
    "# 显示前几行\n",
    "print(f\"\\n提交文件预览:\")\n",
    "print(submission.head(10))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7142d287",
   "metadata": {},
   "source": [
    "## 保存模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a2af251e",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"=\"*100)\n",
    "print(\"步骤7: 保存训练好的模型\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "import joblib\n",
    "\n",
    "model_dir = './model'\n",
    "os.makedirs(model_dir, exist_ok=True)\n",
    "\n",
    "# 保存所有模型\n",
    "for model_name, models_list in trainer.models.items():\n",
    "    for fold_idx, model in enumerate(models_list, 1):\n",
    "        model_path = os.path.join(model_dir, f'{model_name}_fold{fold_idx}.pkl')\n",
    "        \n",
    "        if model_name == 'lgb':\n",
    "            # LightGBM 使用内置保存方法\n",
    "            model.save_model(os.path.join(model_dir, f'{model_name}_fold{fold_idx}.txt'))\n",
    "            print(f\"已保存: {model_name}_fold{fold_idx}.txt\")\n",
    "        elif model_name == 'cat':\n",
    "            # CatBoost 使用内置保存方法\n",
    "            model.save_model(os.path.join(model_dir, f'{model_name}_fold{fold_idx}.cbm'))\n",
    "            print(f\"已保存: {model_name}_fold{fold_idx}.cbm\")\n",
    "        elif model_name == 'xgb':\n",
    "            # XGBoost 使用内置保存方法\n",
    "            model.save_model(os.path.join(model_dir, f'{model_name}_fold{fold_idx}.json'))\n",
    "            print(f\"已保存: {model_name}_fold{fold_idx}.json\")\n",
    "\n",
    "# 保存融合权重\n",
    "if ensemble.best_weights:\n",
    "    weights_path = os.path.join(model_dir, 'ensemble_weights.pkl')\n",
    "    with open(weights_path, 'wb') as f:\n",
    "        pickle.dump(ensemble.best_weights, f)\n",
    "    print(f\"\\n已保存融合权重: ensemble_weights.pkl\")\n",
    "\n",
    "# 保存特征名称\n",
    "features_path = os.path.join(model_dir, 'features.pkl')\n",
    "with open(features_path, 'wb') as f:\n",
    "    pickle.dump(features, f)\n",
    "print(f\"已保存特征列表: features.pkl\")\n",
    "\n",
    "print(f\"\\n所有模型已保存到: {model_dir}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d51fdfab",
   "metadata": {},
   "source": [
    "## 特征重要性分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6bc10a2f",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"=\"*100)\n",
    "print(\"步骤8: 特征重要性分析\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "# 设置绘图风格\n",
    "plt.style.use('seaborn-v0_8-darkgrid')\n",
    "sns.set_palette(\"husl\")\n",
    "\n",
    "# 为每个模型绘制特征重要性\n",
    "fig, axes = plt.subplots(1, 3, figsize=(24, 8))\n",
    "fig.suptitle('各模型特征重要性对比 (Top 30)', fontsize=16, y=1.02)\n",
    "\n",
    "for idx, (model_name, feature_imp) in enumerate(trainer.feature_importance.items()):\n",
    "    # 计算平均重要性\n",
    "    importance_avg = feature_imp.groupby('feature')['importance'].mean().sort_values(ascending=False)\n",
    "    \n",
    "    # 取前30个特征\n",
    "    top_features = importance_avg.head(30)\n",
    "    \n",
    "    # 绘制柱状图\n",
    "    ax = axes[idx]\n",
    "    top_features.plot(kind='barh', ax=ax, color=f'C{idx}')\n",
    "    ax.set_title(f'{model_name.upper()} 模型', fontsize=14, fontweight='bold')\n",
    "    ax.set_xlabel('重要性分数', fontsize=12)\n",
    "    ax.set_ylabel('特征', fontsize=12)\n",
    "    ax.invert_yaxis()\n",
    "    \n",
    "    # 显示数值\n",
    "    for i, v in enumerate(top_features.values):\n",
    "        ax.text(v, i, f' {v:.0f}', va='center', fontsize=9)\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('./model/feature_importance_comparison.png', dpi=300, bbox_inches='tight')\n",
    "print(\"特征重要性对比图已保存: ./model/feature_importance_comparison.png\")\n",
    "plt.show()\n",
    "\n",
    "# 汇总所有模型的特征重要性\n",
    "print(\"\\n汇总特征重要性 (平均所有模型):\")\n",
    "all_importance = pd.DataFrame()\n",
    "for model_name, feature_imp in trainer.feature_importance.items():\n",
    "    model_avg = feature_imp.groupby('feature')['importance'].mean().reset_index()\n",
    "    model_avg.columns = ['feature', f'{model_name}_importance']\n",
    "    if all_importance.empty:\n",
    "        all_importance = model_avg\n",
    "    else:\n",
    "        all_importance = all_importance.merge(model_avg, on='feature', how='outer')\n",
    "\n",
    "# 计算平均重要性\n",
    "importance_cols = [col for col in all_importance.columns if 'importance' in col]\n",
    "all_importance['avg_importance'] = all_importance[importance_cols].mean(axis=1)\n",
    "all_importance = all_importance.sort_values('avg_importance', ascending=False)\n",
    "\n",
    "# 显示Top 50特征\n",
    "print(\"\\nTop 50 重要特征:\")\n",
    "print(all_importance[['feature', 'avg_importance']].head(50).to_string(index=False))\n",
    "\n",
    "# 保存完整特征重要性\n",
    "all_importance.to_csv('./model/feature_importance_summary.csv', index=False)\n",
    "print(\"\\n完整特征重要性已保存: ./model/feature_importance_summary.csv\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cfe90dec",
   "metadata": {},
   "source": [
    "## 完整训练报告"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1897e3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"\\n\" + \"=\"*100)\n",
    "print(\"完整训练报告\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "print(\"\\n1. 数据统计\")\n",
    "print(\"-\" * 100)\n",
    "print(f\"训练样本数: {len(X_train):,}\")\n",
    "print(f\"测试样本数: {len(X_test):,}\")\n",
    "print(f\"特征总数: {len(features):,}\")\n",
    "print(f\"目标类别数: {len(np.unique(y_train))}\")\n",
    "\n",
    "print(\"\\n目标变量分布:\")\n",
    "for flag, count in y_train.value_counts().sort_index().items():\n",
    "    print(f\"  类别 {flag}: {count:6,} ({count/len(y_train)*100:5.2f}%)\")\n",
    "\n",
    "print(\"\\n\" + \"-\" * 100)\n",
    "print(\"2. 单模型表现\")\n",
    "print(\"-\" * 100)\n",
    "print(model_summary.to_string(index=False))\n",
    "\n",
    "print(\"\\n\" + \"-\" * 100)\n",
    "print(\"3. 融合策略对比\")\n",
    "print(\"-\" * 100)\n",
    "print(ensemble_comparison.to_string(index=False))\n",
    "\n",
    "print(\"\\n\" + \"-\" * 100)\n",
    "print(\"4. 最优融合权重\")\n",
    "print(\"-\" * 100)\n",
    "if ensemble.best_weights:\n",
    "    for model_name, weight in ensemble.best_weights.items():\n",
    "        print(f\"{model_name.upper():8s}: {weight:.6f}\")\n",
    "\n",
    "print(\"\\n\" + \"-\" * 100)\n",
    "print(\"5. 最终结果\")\n",
    "print(\"-\" * 100)\n",
    "print(f\"最优融合策略: {ensemble_comparison.iloc[0]['融合策略']}\")\n",
    "print(f\"OOF Macro-F1: {ensemble_comparison.iloc[0]['OOF Macro-F1']:.6f}\")\n",
    "print(f\"提交文件: {output_filename}\")\n",
    "\n",
    "print(\"\\n\" + \"-\" * 100)\n",
    "print(\"6. 测试集预测分布\")\n",
    "print(\"-\" * 100)\n",
    "for flag, count in submission['FLAG'].value_counts().sort_index().items():\n",
    "    print(f\"  类别 {flag}: {count:6,} ({count/len(submission)*100:5.2f}%)\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*100)\n",
    "print(\"训练完成! 所有结果已保存到 ./model 目录\")\n",
    "print(\"=\"*100)\n",
    "\n",
    "# 生成报告文件\n",
    "report_path = './model/training_report.txt'\n",
    "with open(report_path, 'w', encoding='utf-8') as f:\n",
    "    f.write(\"=\"*100 + \"\\n\")\n",
    "    f.write(\"信用卡潜力客户产品匹配推荐 - 模型训练报告\\n\")\n",
    "    f.write(\"=\"*100 + \"\\n\\n\")\n",
    "    \n",
    "    f.write(\"1. 数据统计\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    f.write(f\"训练样本数: {len(X_train):,}\\n\")\n",
    "    f.write(f\"测试样本数: {len(X_test):,}\\n\")\n",
    "    f.write(f\"特征总数: {len(features):,}\\n\")\n",
    "    f.write(f\"目标类别数: {len(np.unique(y_train))}\\n\\n\")\n",
    "    \n",
    "    f.write(\"目标变量分布:\\n\")\n",
    "    for flag, count in y_train.value_counts().sort_index().items():\n",
    "        f.write(f\"  类别 {flag}: {count:6,} ({count/len(y_train)*100:5.2f}%)\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"-\" * 100 + \"\\n\")\n",
    "    f.write(\"2. 单模型表现\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    f.write(model_summary.to_string(index=False) + \"\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"-\" * 100 + \"\\n\")\n",
    "    f.write(\"3. 融合策略对比\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    f.write(ensemble_comparison.to_string(index=False) + \"\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"-\" * 100 + \"\\n\")\n",
    "    f.write(\"4. 最优融合权重\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    if ensemble.best_weights:\n",
    "        for model_name, weight in ensemble.best_weights.items():\n",
    "            f.write(f\"{model_name.upper():8s}: {weight:.6f}\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"-\" * 100 + \"\\n\")\n",
    "    f.write(\"5. 最终结果\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    f.write(f\"最优融合策略: {ensemble_comparison.iloc[0]['融合策略']}\\n\")\n",
    "    f.write(f\"OOF Macro-F1: {ensemble_comparison.iloc[0]['OOF Macro-F1']:.6f}\\n\")\n",
    "    f.write(f\"提交文件: {output_filename}\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"-\" * 100 + \"\\n\")\n",
    "    f.write(\"6. 测试集预测分布\\n\")\n",
    "    f.write(\"-\" * 100 + \"\\n\")\n",
    "    for flag, count in submission['FLAG'].value_counts().sort_index().items():\n",
    "        f.write(f\"  类别 {flag}: {count:6,} ({count/len(submission)*100:5.2f}%)\\n\")\n",
    "    \n",
    "    f.write(\"\\n\" + \"=\"*100 + \"\\n\")\n",
    "    f.write(\"训练完成时间: \" + pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S') + \"\\n\")\n",
    "    f.write(\"=\"*100 + \"\\n\")\n",
    "\n",
    "print(f\"\\n训练报告已保存: {report_path}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
