{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "09f7126f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c6ab2b72",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "import seaborn as sns\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a08d6044",
   "metadata": {},
   "source": [
    "## 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df055add",
   "metadata": {},
   "source": [
    "## 通用导入函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "74bcbf7b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "993c86ce",
   "metadata": {},
   "source": [
    "## 训练集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "722d1e40",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>OPR_DATE</th>\n",
       "      <th>OPR_TIME</th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>CUR_PAGE</th>\n",
       "      <th>LAST_PAGE</th>\n",
       "      <th>EVENT_NAME</th>\n",
       "      <th>MOD_NAME</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>20131001</td>\n",
       "      <td>07:26:19</td>\n",
       "      <td>ef9574fea62d42b9f36f53efb83931d2</td>\n",
       "      <td>ef4acd257f70d8237384bd10bd0eb0c4</td>\n",
       "      <td>5fa51e78f9086c800a2d927bba932f46</td>\n",
       "      <td>c3644c825659c088f8eab4b799e2c66a</td>\n",
       "      <td>e43a50ca221f89b61bc645cc05f982f3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>20131003</td>\n",
       "      <td>07:43:12</td>\n",
       "      <td>d0c86160a7afeecc4041f9b18414f10f</td>\n",
       "      <td>a6e6d7234e0b7b3848eeea5d04611e62</td>\n",
       "      <td>03a9f80fc730d0ae0a3823f38b0d9ace</td>\n",
       "      <td>aacaedb874a9fb96bf0cda140dfc6f94</td>\n",
       "      <td>66e1fa9949d9931c69fe4f64a9b0f221</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>20131001</td>\n",
       "      <td>10:22:52</td>\n",
       "      <td>fbff3a8ebfaf8707f93bf52a200eb166</td>\n",
       "      <td>e2708b3ecf129eab747f746ed7ed9189</td>\n",
       "      <td>c4f15649fbbb62655e0281c6bed07350</td>\n",
       "      <td>c3644c825659c088f8eab4b799e2c66a</td>\n",
       "      <td>89f39ae744fe9f589ad73088dbba9f7d</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>20131007</td>\n",
       "      <td>00:12:46</td>\n",
       "      <td>c235e7536c96f5f2a79721e106370c56</td>\n",
       "      <td>845f9c031c8d9498f0de25ead7621cfa</td>\n",
       "      <td>ee7bd5c4578baeed03b896e9db9cf41b</td>\n",
       "      <td>aacaedb874a9fb96bf0cda140dfc6f94</td>\n",
       "      <td>66e1fa9949d9931c69fe4f64a9b0f221</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>20131002</td>\n",
       "      <td>11:00:57</td>\n",
       "      <td>a137205b6076ed04030d1dc39326e42e</td>\n",
       "      <td>c7cb0d23dcee31f682a1110f10159e31</td>\n",
       "      <td>845f9c031c8d9498f0de25ead7621cfa</td>\n",
       "      <td>c3644c825659c088f8eab4b799e2c66a</td>\n",
       "      <td>d02481baf0889f9012d0ccea950282d3</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   OPR_DATE  OPR_TIME                           CUST_NO  \\\n",
       "0  20131001  07:26:19  ef9574fea62d42b9f36f53efb83931d2   \n",
       "1  20131003  07:43:12  d0c86160a7afeecc4041f9b18414f10f   \n",
       "2  20131001  10:22:52  fbff3a8ebfaf8707f93bf52a200eb166   \n",
       "3  20131007  00:12:46  c235e7536c96f5f2a79721e106370c56   \n",
       "4  20131002  11:00:57  a137205b6076ed04030d1dc39326e42e   \n",
       "\n",
       "                           CUR_PAGE                         LAST_PAGE  \\\n",
       "0  ef4acd257f70d8237384bd10bd0eb0c4  5fa51e78f9086c800a2d927bba932f46   \n",
       "1  a6e6d7234e0b7b3848eeea5d04611e62  03a9f80fc730d0ae0a3823f38b0d9ace   \n",
       "2  e2708b3ecf129eab747f746ed7ed9189  c4f15649fbbb62655e0281c6bed07350   \n",
       "3  845f9c031c8d9498f0de25ead7621cfa  ee7bd5c4578baeed03b896e9db9cf41b   \n",
       "4  c7cb0d23dcee31f682a1110f10159e31  845f9c031c8d9498f0de25ead7621cfa   \n",
       "\n",
       "                         EVENT_NAME                          MOD_NAME  \n",
       "0  c3644c825659c088f8eab4b799e2c66a  e43a50ca221f89b61bc645cc05f982f3  \n",
       "1  aacaedb874a9fb96bf0cda140dfc6f94  66e1fa9949d9931c69fe4f64a9b0f221  \n",
       "2  c3644c825659c088f8eab4b799e2c66a  89f39ae744fe9f589ad73088dbba9f7d  \n",
       "3  aacaedb874a9fb96bf0cda140dfc6f94  66e1fa9949d9931c69fe4f64a9b0f221  \n",
       "4  c3644c825659c088f8eab4b799e2c66a  d02481baf0889f9012d0ccea950282d3  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_mb_pageview_dtl_data = pd.read_csv('./data/Train/TRAIN_MB_PAGEVIEW_DTL.csv')\n",
    "train_mb_pageview_dtl_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "82e70968",
   "metadata": {},
   "source": [
    "## 测试集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3ef0f15",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 A_ASSET_data 已加载为 DataFrame\n",
      "数据集 A_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_CUST_INFO_data 已加载为 DataFrame\n",
      "数据集 A_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_TRNFLW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 A_TEST_NATURE_data 已加载为 DataFrame\n",
      "数据集 A_TR_APS_DTL_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "#A_tr_aps_dtl_data = pd.read_csv('./data/A/A_TR_APS_DTL.csv')\n",
    "#A_tr_aps_dtl_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b6a9129f",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ca6b3c5",
   "metadata": {},
   "source": [
    "## 数据探查与预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "4ffdcde8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "掌银页面访问明细表基本信息:\n",
      "训练集数据形状: (18987427, 7)\n",
      "\n",
      "数据类型:\n",
      "OPR_DATE      datetime64[ns]\n",
      "OPR_TIME              object\n",
      "CUST_NO               object\n",
      "CUR_PAGE              object\n",
      "LAST_PAGE             object\n",
      "EVENT_NAME            object\n",
      "MOD_NAME              object\n",
      "dtype: object\n",
      "\n",
      "数据前5行:\n",
      "    OPR_DATE  OPR_TIME                           CUST_NO  \\\n",
      "0 2013-10-01  07:26:19  ef9574fea62d42b9f36f53efb83931d2   \n",
      "1 2013-10-03  07:43:12  d0c86160a7afeecc4041f9b18414f10f   \n",
      "2 2013-10-01  10:22:52  fbff3a8ebfaf8707f93bf52a200eb166   \n",
      "3 2013-10-07  00:12:46  c235e7536c96f5f2a79721e106370c56   \n",
      "4 2013-10-02  11:00:57  a137205b6076ed04030d1dc39326e42e   \n",
      "\n",
      "                           CUR_PAGE                         LAST_PAGE  \\\n",
      "0  ef4acd257f70d8237384bd10bd0eb0c4  5fa51e78f9086c800a2d927bba932f46   \n",
      "1  a6e6d7234e0b7b3848eeea5d04611e62  03a9f80fc730d0ae0a3823f38b0d9ace   \n",
      "2  e2708b3ecf129eab747f746ed7ed9189  c4f15649fbbb62655e0281c6bed07350   \n",
      "3  845f9c031c8d9498f0de25ead7621cfa  ee7bd5c4578baeed03b896e9db9cf41b   \n",
      "4  c7cb0d23dcee31f682a1110f10159e31  845f9c031c8d9498f0de25ead7621cfa   \n",
      "\n",
      "                         EVENT_NAME                          MOD_NAME  \n",
      "0  c3644c825659c088f8eab4b799e2c66a  e43a50ca221f89b61bc645cc05f982f3  \n",
      "1  aacaedb874a9fb96bf0cda140dfc6f94  66e1fa9949d9931c69fe4f64a9b0f221  \n",
      "2  c3644c825659c088f8eab4b799e2c66a  89f39ae744fe9f589ad73088dbba9f7d  \n",
      "3  aacaedb874a9fb96bf0cda140dfc6f94  66e1fa9949d9931c69fe4f64a9b0f221  \n",
      "4  c3644c825659c088f8eab4b799e2c66a  d02481baf0889f9012d0ccea950282d3  \n",
      "\n",
      "缺失值统计:\n",
      "OPR_DATE      0\n",
      "OPR_TIME      0\n",
      "CUST_NO       0\n",
      "CUR_PAGE      0\n",
      "LAST_PAGE     0\n",
      "EVENT_NAME    0\n",
      "MOD_NAME      0\n",
      "dtype: int64\n",
      "\n",
      "数值型字段统计:\n",
      "                            OPR_DATE\n",
      "count                       18987427\n",
      "mean   2013-11-24 05:45:29.650038784\n",
      "min              2013-10-01 00:00:00\n",
      "25%              2013-11-01 00:00:00\n",
      "50%              2013-11-29 00:00:00\n",
      "75%              2013-12-21 00:00:00\n",
      "max              2013-12-30 00:00:00\n",
      "\n",
      "唯一客户数: 59479\n"
     ]
    }
   ],
   "source": [
    "print(\"掌银页面访问明细表基本信息:\")\n",
    "print(f\"训练集数据形状: {train_mb_pageview_dtl_data.shape}\")\n",
    "print(f\"\\n数据类型:\\n{train_mb_pageview_dtl_data.dtypes}\")\n",
    "print(f\"\\n数据前5行:\\n{train_mb_pageview_dtl_data.head()}\")\n",
    "print(f\"\\n缺失值统计:\\n{train_mb_pageview_dtl_data.isnull().sum()}\")\n",
    "print(f\"\\n数值型字段统计:\\n{train_mb_pageview_dtl_data.describe()}\")\n",
    "print(f\"\\n唯一客户数: {train_mb_pageview_dtl_data['CUST_NO'].nunique()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "98f45e8a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "分析各字段唯一值数量:\n",
      "OPR_DATE: 91 个唯一值\n",
      "OPR_TIME: 86400 个唯一值\n",
      "CUST_NO: 59479 个唯一值\n",
      "CUR_PAGE: 7582 个唯一值\n",
      "LAST_PAGE: 7819 个唯一值\n",
      "EVENT_NAME: 3 个唯一值\n",
      "MOD_NAME: 375 个唯一值\n",
      "\n",
      "当前页面top20:\n",
      "CUR_PAGE\n",
      "845f9c031c8d9498f0de25ead7621cfa    3458592\n",
      "d8188552f0bb760340fcde6e06961988    1053016\n",
      "ee7bd5c4578baeed03b896e9db9cf41b     871987\n",
      "c7c1d6e50c483b92843b88f38b46aec5     835605\n",
      "953d3f55a65e6ce18cd62a1dc07cb65f     802618\n",
      "a6e6d7234e0b7b3848eeea5d04611e62     788527\n",
      "c7cb0d23dcee31f682a1110f10159e31     611213\n",
      "66e1fa9949d9931c69fe4f64a9b0f221     566152\n",
      "167db1655df3856c1060316fd4fe55b6     485996\n",
      "32dcf982042fc7be55e0ddf957367886     375804\n",
      "bdb4a189c233886c23b0fb280a03e629     364175\n",
      "579d5ae9f3d97049874eab36e2a468a6     304443\n",
      "701cb907f30a69ab2bf788d085c7e2fc     266310\n",
      "8f3e8b87166e5df87de7dbbd97185eef     265763\n",
      "c08390a109ec4c8e9038bdb476805196     261577\n",
      "f8c3374c4d43294e79abf69da57704ad     201611\n",
      "190fc339ea130ea350750693ef8f7d51     177472\n",
      "d737c53c87b7aef9c299771aec9102ab     161020\n",
      "0608bdb97bd33e87a6b383bf8c42c6b7     137976\n",
      "dea4fd0d0ccbfbb3270a33d28bbfbe7d     136987\n",
      "Name: count, dtype: int64\n",
      "\n",
      "上一个页面top20:\n",
      "LAST_PAGE\n",
      "845f9c031c8d9498f0de25ead7621cfa    2742603\n",
      "66e1fa9949d9931c69fe4f64a9b0f221    2223241\n",
      "d8188552f0bb760340fcde6e06961988    1035802\n",
      "ee7bd5c4578baeed03b896e9db9cf41b     834438\n",
      "c7c1d6e50c483b92843b88f38b46aec5     730650\n",
      "953d3f55a65e6ce18cd62a1dc07cb65f     614648\n",
      "a6e6d7234e0b7b3848eeea5d04611e62     599112\n",
      "c7cb0d23dcee31f682a1110f10159e31     585668\n",
      "167db1655df3856c1060316fd4fe55b6     453428\n",
      "32dcf982042fc7be55e0ddf957367886     361210\n",
      "bdb4a189c233886c23b0fb280a03e629     356602\n",
      "8f3e8b87166e5df87de7dbbd97185eef     260434\n",
      "c08390a109ec4c8e9038bdb476805196     248461\n",
      "579d5ae9f3d97049874eab36e2a468a6     246181\n",
      "701cb907f30a69ab2bf788d085c7e2fc     244508\n",
      "d737c53c87b7aef9c299771aec9102ab     209068\n",
      "f8c3374c4d43294e79abf69da57704ad     159766\n",
      "dea4fd0d0ccbfbb3270a33d28bbfbe7d     135486\n",
      "0608bdb97bd33e87a6b383bf8c42c6b7     132962\n",
      "190fc339ea130ea350750693ef8f7d51     130278\n",
      "Name: count, dtype: int64\n",
      "\n",
      "事件名称分布:\n",
      "EVENT_NAME\n",
      "c3644c825659c088f8eab4b799e2c66a    9926288\n",
      "aacaedb874a9fb96bf0cda140dfc6f94    8913881\n",
      "7e08a1b966e06e8ac67f68e4b77ac007     147258\n",
      "Name: count, dtype: int64\n",
      "\n",
      "模块名称top20:\n",
      "MOD_NAME\n",
      "66e1fa9949d9931c69fe4f64a9b0f221    8914344\n",
      "e94fd0f3c55b2f8f59ed88dce80b8af9    1909934\n",
      "d02481baf0889f9012d0ccea950282d3    1566279\n",
      "f034c0bba53507a00e511a15a9ebe384    1388079\n",
      "d477e1c3cb589eee34751b69fc787f89     942853\n",
      "76a879fb3d51de916109927b2da493ae     455338\n",
      "e43a50ca221f89b61bc645cc05f982f3     355924\n",
      "f92300182946b41258a18e292ed3fa9e     274262\n",
      "6c369452371cb4683108315d85288188     265870\n",
      "973a6be45c28c5e9ada0e39b9185b353     181491\n",
      "b13104f310c62df773055d842771fa98     175125\n",
      "ea6b80246cf052fbf5c4b5749f5f3a6c     137906\n",
      "91418cb75348e96ad5a5abc3e875d107     135847\n",
      "fd5724b0394e682971d3ba95b2f52e15     131318\n",
      "d4274512e32179cd1518ab63d864a7d7     125322\n",
      "98436471432fb09d02e7256cb381c877     109539\n",
      "b8edc1ee1f207441355bfe24272138b2     109515\n",
      "3e1997cc31afdd2a06773a845eca6824     107178\n",
      "a190a4295a223f5538676ccdc28063e7      98260\n",
      "c10151331a26fad6677d9ab5725f3546      78086\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "print(\"分析各字段唯一值数量:\")\n",
    "for col in train_mb_pageview_dtl_data.columns:\n",
    "    print(f\"{col}: {train_mb_pageview_dtl_data[col].nunique()} 个唯一值\")\n",
    "\n",
    "print(\"\\n当前页面top20:\")\n",
    "print(train_mb_pageview_dtl_data['CUR_PAGE'].value_counts().head(20))\n",
    "\n",
    "print(\"\\n上一个页面top20:\")\n",
    "print(train_mb_pageview_dtl_data['LAST_PAGE'].value_counts().head(20))\n",
    "\n",
    "print(\"\\n事件名称分布:\")\n",
    "print(train_mb_pageview_dtl_data['EVENT_NAME'].value_counts())\n",
    "\n",
    "print(\"\\n模块名称top20:\")\n",
    "print(train_mb_pageview_dtl_data['MOD_NAME'].value_counts().head(20))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "d61ca393",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据预处理完成!\n",
      "处理后数据形状: (18987427, 12)\n",
      "新增字段: OPR_DATETIME, OPR_HOUR, OPR_WEEKDAY, OPR_DAY, OPR_MONTH\n",
      "\n",
      "缺失值统计:\n",
      "OPR_DATE               0\n",
      "OPR_TIME        18987427\n",
      "CUST_NO                0\n",
      "CUR_PAGE               0\n",
      "LAST_PAGE              0\n",
      "EVENT_NAME             0\n",
      "MOD_NAME               0\n",
      "OPR_DATETIME    18987427\n",
      "OPR_HOUR        18987427\n",
      "OPR_WEEKDAY            0\n",
      "OPR_DAY                0\n",
      "OPR_MONTH              0\n",
      "dtype: int64\n"
     ]
    }
   ],
   "source": [
    "def preprocess_mb_pageview_data(df):\n",
    "    \"\"\"\n",
    "    预处理掌银页面访问明细表数据\n",
    "    \n",
    "    参数:\n",
    "    - df: 原始数据框\n",
    "    \n",
    "    返回:\n",
    "    - 处理后的数据框\n",
    "    \"\"\"\n",
    "    df = df.copy()\n",
    "    \n",
    "    if 'OPR_DATE' in df.columns and 'OPR_TIME' in df.columns:\n",
    "        # 检查OPR_DATE是否已经是datetime类型\n",
    "        if not pd.api.types.is_datetime64_any_dtype(df['OPR_DATE']):\n",
    "            # 如果不是datetime类型,则转换\n",
    "            df['OPR_DATE'] = df['OPR_DATE'].astype('Int64').astype(str).str.replace('<NA>', 'NaN')\n",
    "            df['OPR_DATE'] = pd.to_datetime(df['OPR_DATE'], format='%Y%m%d', errors='coerce')\n",
    "        \n",
    "        # 处理OPR_TIME\n",
    "        def parse_time(time_str):\n",
    "            if pd.isna(time_str) or time_str == 'NaN':\n",
    "                return pd.NaT\n",
    "            # 如果已经是HH:MM:SS格式\n",
    "            if isinstance(time_str, str) and ':' in time_str:\n",
    "                try:\n",
    "                    return pd.to_datetime(time_str, format='%H:%M:%S').time()\n",
    "                except:\n",
    "                    return pd.NaT\n",
    "            # 如果是整数格式HHMMSS\n",
    "            time_str = str(time_str).zfill(6)\n",
    "            try:\n",
    "                return pd.to_datetime(time_str, format='%H%M%S').time()\n",
    "            except:\n",
    "                return pd.NaT\n",
    "        \n",
    "        df['OPR_TIME'] = df['OPR_TIME'].apply(parse_time)\n",
    "        \n",
    "        df['OPR_DATETIME'] = pd.to_datetime(\n",
    "            df['OPR_DATE'].astype(str) + ' ' + df['OPR_TIME'].astype(str),\n",
    "            errors='coerce'\n",
    "        )\n",
    "        \n",
    "        df['OPR_HOUR'] = df['OPR_TIME'].apply(lambda x: x.hour if pd.notna(x) else np.nan)\n",
    "        df['OPR_WEEKDAY'] = df['OPR_DATE'].dt.dayofweek\n",
    "        df['OPR_DAY'] = df['OPR_DATE'].dt.day\n",
    "        df['OPR_MONTH'] = df['OPR_DATE'].dt.month\n",
    "    \n",
    "    for col in ['CUR_PAGE', 'LAST_PAGE', 'EVENT_NAME', 'MOD_NAME']:\n",
    "        if col in df.columns:\n",
    "            df[col] = df[col].fillna('unknown')\n",
    "    \n",
    "    if 'CUST_NO' in df.columns:\n",
    "        df['CUST_NO'] = df['CUST_NO'].astype(str)\n",
    "    \n",
    "    return df\n",
    "\n",
    "Train_mb_pageview_dtl = preprocess_mb_pageview_data(train_mb_pageview_dtl_data)\n",
    "print(\"数据预处理完成!\")\n",
    "print(f\"处理后数据形状: {Train_mb_pageview_dtl.shape}\")\n",
    "print(f\"新增字段: OPR_DATETIME, OPR_HOUR, OPR_WEEKDAY, OPR_DAY, OPR_MONTH\")\n",
    "print(f\"\\n缺失值统计:\\n{Train_mb_pageview_dtl.isnull().sum()}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c7931ba5",
   "metadata": {},
   "source": [
    "## 特征工程函数定义"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ce0ba2c8",
   "metadata": {},
   "source": [
    "### 1. 基础统计特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "a4080f7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_basic_statistics_features(df):\n",
    "    \"\"\"\n",
    "    生成基础统计特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df_grouped = df.groupby('CUST_NO')\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped.size().reset_index(name='mb_pageview_total_count'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped['CUR_PAGE'].nunique().reset_index(name='mb_pageview_cur_page_nunique'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped['LAST_PAGE'].nunique().reset_index(name='mb_pageview_last_page_nunique'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped['EVENT_NAME'].nunique().reset_index(name='mb_pageview_event_nunique'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped['MOD_NAME'].nunique().reset_index(name='mb_pageview_mod_nunique'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(\n",
    "        df_grouped['OPR_DATE'].nunique().reset_index(name='mb_pageview_active_days'),\n",
    "        on='CUST_NO',\n",
    "        how='left'\n",
    "    )\n",
    "    \n",
    "    features['mb_pageview_avg_daily_count'] = features['mb_pageview_total_count'] / features['mb_pageview_active_days']\n",
    "    \n",
    "    features['mb_pageview_page_diversity'] = features['mb_pageview_cur_page_nunique'] / (features['mb_pageview_total_count'] + 1)\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1fc9c1b3",
   "metadata": {},
   "source": [
    "### 2. 时间行为特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "28f9d7b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_time_based_features(df):\n",
    "    \"\"\"\n",
    "    生成基于时间的特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df_with_time = df[df['OPR_HOUR'].notna()].copy()\n",
    "    \n",
    "    if len(df_with_time) > 0:\n",
    "        df_with_time['is_morning'] = (df_with_time['OPR_HOUR'] >= 6) & (df_with_time['OPR_HOUR'] < 12)\n",
    "        df_with_time['is_afternoon'] = (df_with_time['OPR_HOUR'] >= 12) & (df_with_time['OPR_HOUR'] < 18)\n",
    "        df_with_time['is_evening'] = (df_with_time['OPR_HOUR'] >= 18) & (df_with_time['OPR_HOUR'] < 24)\n",
    "        df_with_time['is_night'] = (df_with_time['OPR_HOUR'] >= 0) & (df_with_time['OPR_HOUR'] < 6)\n",
    "        \n",
    "        time_agg = df_with_time.groupby('CUST_NO').agg({\n",
    "            'OPR_HOUR': ['mean', 'std', 'min', 'max'],\n",
    "            'is_morning': 'sum',\n",
    "            'is_afternoon': 'sum',\n",
    "            'is_evening': 'sum',\n",
    "            'is_night': 'sum'\n",
    "        })\n",
    "        \n",
    "        time_agg.columns = [\n",
    "            'mb_pageview_hour_mean', 'mb_pageview_hour_std', 'mb_pageview_hour_min', 'mb_pageview_hour_max',\n",
    "            'mb_pageview_morning_count', 'mb_pageview_afternoon_count', \n",
    "            'mb_pageview_evening_count', 'mb_pageview_night_count'\n",
    "        ]\n",
    "        \n",
    "        time_agg = time_agg.reset_index()\n",
    "        features = features.merge(time_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    weekday_agg = df.groupby('CUST_NO')['OPR_WEEKDAY'].agg(['mean', 'std']).reset_index()\n",
    "    weekday_agg.columns = ['CUST_NO', 'mb_pageview_weekday_mean', 'mb_pageview_weekday_std']\n",
    "    features = features.merge(weekday_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    df['is_weekend'] = df['OPR_WEEKDAY'].isin([5, 6])\n",
    "    weekend_agg = df.groupby('CUST_NO')['is_weekend'].sum().reset_index(name='mb_pageview_weekend_count')\n",
    "    features = features.merge(weekend_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    df_sorted = df.sort_values(['CUST_NO', 'OPR_DATETIME'])\n",
    "    df_sorted['time_diff'] = df_sorted.groupby('CUST_NO')['OPR_DATETIME'].diff().dt.total_seconds()\n",
    "    \n",
    "    time_diff_agg = df_sorted.groupby('CUST_NO')['time_diff'].agg(['mean', 'std', 'min', 'max']).reset_index()\n",
    "    time_diff_agg.columns = ['CUST_NO', 'mb_pageview_time_diff_mean', 'mb_pageview_time_diff_std',\n",
    "                              'mb_pageview_time_diff_min', 'mb_pageview_time_diff_max']\n",
    "    features = features.merge(time_diff_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aae70f69",
   "metadata": {},
   "source": [
    "### 3. 页面转换特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "2b7099eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_page_transition_features(df):\n",
    "    \"\"\"\n",
    "    生成页面转换特征(当前页面和上一页面的关系)\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df_transition = df.copy()\n",
    "    df_transition['page_transition'] = (\n",
    "        df_transition['LAST_PAGE'].astype(str) + '_to_' + df_transition['CUR_PAGE'].astype(str)\n",
    "    )\n",
    "    \n",
    "    transition_agg = df_transition.groupby('CUST_NO')['page_transition'].agg(['nunique', 'count']).reset_index()\n",
    "    transition_agg.columns = ['CUST_NO', 'mb_pageview_transition_nunique', 'mb_pageview_transition_count']\n",
    "    features = features.merge(transition_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    features['mb_pageview_transition_diversity'] = (\n",
    "        features['mb_pageview_transition_nunique'] / (features['mb_pageview_transition_count'] + 1)\n",
    "    )\n",
    "    \n",
    "    df_self_loop = df_transition[df_transition['CUR_PAGE'] == df_transition['LAST_PAGE']].copy()\n",
    "    self_loop_agg = df_self_loop.groupby('CUST_NO').size().reset_index(name='mb_pageview_self_loop_count')\n",
    "    features = features.merge(self_loop_agg, on='CUST_NO', how='left')\n",
    "    features['mb_pageview_self_loop_count'] = features['mb_pageview_self_loop_count'].fillna(0)\n",
    "    \n",
    "    features['mb_pageview_self_loop_ratio'] = (\n",
    "        features['mb_pageview_self_loop_count'] / (features['mb_pageview_transition_count'] + 1)\n",
    "    )\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5cf508ea",
   "metadata": {},
   "source": [
    "### 4. 事件特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "46ecc6cf",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_event_features(df):\n",
    "    \"\"\"\n",
    "    生成事件相关特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    event_pivot = df.groupby(['CUST_NO', 'EVENT_NAME']).size().unstack(fill_value=0)\n",
    "    event_pivot.columns = ['mb_pageview_event_' + str(col) + '_count' for col in event_pivot.columns]\n",
    "    event_pivot = event_pivot.reset_index()\n",
    "    features = features.merge(event_pivot, on='CUST_NO', how='left')\n",
    "    \n",
    "    event_ratio = df.groupby('CUST_NO')['EVENT_NAME'].apply(\n",
    "        lambda x: x.value_counts(normalize=True).to_dict()\n",
    "    ).reset_index()\n",
    "    \n",
    "    for event in df['EVENT_NAME'].unique():\n",
    "        if event != 'unknown':\n",
    "            features['mb_pageview_event_' + str(event) + '_ratio'] = event_ratio['EVENT_NAME'].apply(\n",
    "                lambda x: x.get(event, 0) if isinstance(x, dict) else 0\n",
    "            )\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c1827e35",
   "metadata": {},
   "source": [
    "### 5. 模块特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "3fea3370",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_module_features(df):\n",
    "    \"\"\"\n",
    "    生成模块相关特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    top_modules = df['MOD_NAME'].value_counts().head(30).index.tolist()\n",
    "    \n",
    "    df_top_modules = df[df['MOD_NAME'].isin(top_modules)].copy()\n",
    "    \n",
    "    module_pivot = df_top_modules.groupby(['CUST_NO', 'MOD_NAME']).size().unstack(fill_value=0)\n",
    "    module_pivot.columns = ['mb_pageview_mod_' + str(col) + '_count' for col in module_pivot.columns]\n",
    "    module_pivot = module_pivot.reset_index()\n",
    "    features = features.merge(module_pivot, on='CUST_NO', how='left')\n",
    "    \n",
    "    module_stats = df.groupby('CUST_NO')['MOD_NAME'].agg([\n",
    "        ('mb_pageview_mod_count', 'count'),\n",
    "        ('mb_pageview_mod_nunique', 'nunique')\n",
    "    ]).reset_index()\n",
    "    features = features.merge(module_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    features['mb_pageview_mod_diversity'] = (\n",
    "        features['mb_pageview_mod_nunique'] / (features['mb_pageview_mod_count'] + 1)\n",
    "    )\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c7abcddd",
   "metadata": {},
   "source": [
    "### 6. 时间窗口统计特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "68d94d29",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_time_window_aggregation_features(df):\n",
    "    \"\"\"\n",
    "    生成按时间窗口(天/周/月)聚合的统计特征\n",
    "    参考往年代码gen_mb_features_by_day\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    max_date = df['OPR_DATE'].max()\n",
    "    df['days_to_now'] = (max_date - df['OPR_DATE']).dt.days\n",
    "    df['weeks_to_now'] = df['days_to_now'] // 7\n",
    "    df['months_to_now'] = df['days_to_now'] // 31\n",
    "    \n",
    "    df_by_day = df.groupby(['CUST_NO', 'days_to_now', 'weeks_to_now', 'months_to_now'])['CUR_PAGE'].agg(['nunique', 'count'])\n",
    "    df_by_day.columns = ['page_nunique', 'page_count']\n",
    "    df_by_day = df_by_day.reset_index()\n",
    "    \n",
    "    tmp_df_nunique = df_by_day.groupby(['CUST_NO']).agg({'page_nunique': 'max'}).reset_index()\n",
    "    tmp_df_nunique = tmp_df_nunique.merge(\n",
    "        df_by_day[['CUST_NO', 'days_to_now', 'page_nunique']], \n",
    "        on=['CUST_NO', 'page_nunique'], \n",
    "        how='inner'\n",
    "    )\n",
    "    tmp_df_nunique = tmp_df_nunique.groupby(['CUST_NO'])['days_to_now'].min().to_frame('mb_pageview_max_nunique_days_to_now').reset_index()\n",
    "    features = features.merge(tmp_df_nunique, on='CUST_NO', how='left')\n",
    "    \n",
    "    tmp_df_count = df_by_day.groupby(['CUST_NO']).agg({'page_count': 'max'}).reset_index()\n",
    "    tmp_df_count = tmp_df_count.merge(\n",
    "        df_by_day[['CUST_NO', 'days_to_now', 'page_count']], \n",
    "        on=['CUST_NO', 'page_count'], \n",
    "        how='inner'\n",
    "    )\n",
    "    tmp_df_count = tmp_df_count.groupby(['CUST_NO'])['days_to_now'].min().to_frame('mb_pageview_max_count_days_to_now').reset_index()\n",
    "    features = features.merge(tmp_df_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    week_agg = df_by_day.groupby(['CUST_NO', 'weeks_to_now'])['page_count'].sum().reset_index()\n",
    "    week_stats = week_agg.groupby('CUST_NO')['page_count'].agg([\n",
    "        ('mb_pageview_week_count_mean', 'mean'),\n",
    "        ('mb_pageview_week_count_std', 'std'),\n",
    "        ('mb_pageview_week_count_max', 'max'),\n",
    "        ('mb_pageview_week_count_min', 'min')\n",
    "    ]).reset_index()\n",
    "    features = features.merge(week_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    month_agg = df_by_day.groupby(['CUST_NO', 'months_to_now'])['page_count'].sum().reset_index()\n",
    "    month_stats = month_agg.groupby('CUST_NO')['page_count'].agg([\n",
    "        ('mb_pageview_month_count_mean', 'mean'),\n",
    "        ('mb_pageview_month_count_std', 'std'),\n",
    "        ('mb_pageview_month_count_max', 'max'),\n",
    "        ('mb_pageview_month_count_min', 'min')\n",
    "    ]).reset_index()\n",
    "    features = features.merge(month_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1231790d",
   "metadata": {},
   "source": [
    "### 7. 近期行为特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "cc3da81e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_recent_behavior_features(df):\n",
    "    \"\"\"\n",
    "    生成近期行为特征(最近7天、14天、30天、60天、90天)\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    max_date = df['OPR_DATE'].max()\n",
    "    \n",
    "    for days in [7, 14, 30, 60, 90]:\n",
    "        cutoff_date = max_date - pd.Timedelta(days=days)\n",
    "        df_recent = df[df['OPR_DATE'] >= cutoff_date].copy()\n",
    "        \n",
    "        recent_agg = df_recent.groupby('CUST_NO').agg({\n",
    "            'CUR_PAGE': ['count', 'nunique'],\n",
    "            'MOD_NAME': 'nunique',\n",
    "            'EVENT_NAME': 'nunique',\n",
    "            'OPR_DATE': 'nunique'\n",
    "        })\n",
    "        \n",
    "        recent_agg.columns = [\n",
    "            f'mb_pageview_last{days}d_count',\n",
    "            f'mb_pageview_last{days}d_page_nunique',\n",
    "            f'mb_pageview_last{days}d_mod_nunique',\n",
    "            f'mb_pageview_last{days}d_event_nunique',\n",
    "            f'mb_pageview_last{days}d_active_days'\n",
    "        ]\n",
    "        \n",
    "        recent_agg = recent_agg.reset_index()\n",
    "        features = features.merge(recent_agg, on='CUST_NO', how='left')\n",
    "        \n",
    "        features[f'mb_pageview_last{days}d_avg_daily'] = (\n",
    "            features[f'mb_pageview_last{days}d_count'] / (features[f'mb_pageview_last{days}d_active_days'] + 1)\n",
    "        )\n",
    "    \n",
    "    for i in range(len([7, 14, 30, 60, 90]) - 1):\n",
    "        curr_days = [7, 14, 30, 60, 90][i]\n",
    "        next_days = [7, 14, 30, 60, 90][i + 1]\n",
    "        features[f'mb_pageview_growth_{curr_days}to{next_days}d'] = (\n",
    "            (features[f'mb_pageview_last{curr_days}d_count'] + 1) / \n",
    "            (features[f'mb_pageview_last{next_days}d_count'] + 1)\n",
    "        )\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c909ae83",
   "metadata": {},
   "source": [
    "### 8. 会话特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "160497f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_session_features(df):\n",
    "    \"\"\"\n",
    "    生成会话相关特征(基于时间间隔识别会话)\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df_sorted = df.sort_values(['CUST_NO', 'OPR_DATETIME']).copy()\n",
    "    df_sorted['time_diff'] = df_sorted.groupby('CUST_NO')['OPR_DATETIME'].diff().dt.total_seconds()\n",
    "    \n",
    "    df_sorted['new_session'] = (df_sorted['time_diff'] > 1800) | (df_sorted['time_diff'].isna())\n",
    "    df_sorted['session_id'] = df_sorted.groupby('CUST_NO')['new_session'].cumsum()\n",
    "    \n",
    "    session_stats = df_sorted.groupby(['CUST_NO', 'session_id']).size().reset_index(name='session_length')\n",
    "    \n",
    "    session_agg = session_stats.groupby('CUST_NO')['session_length'].agg([\n",
    "        ('mb_pageview_session_count', 'count'),\n",
    "        ('mb_pageview_session_avg_length', 'mean'),\n",
    "        ('mb_pageview_session_max_length', 'max'),\n",
    "        ('mb_pageview_session_min_length', 'min'),\n",
    "        ('mb_pageview_session_std_length', 'std')\n",
    "    ]).reset_index()\n",
    "    \n",
    "    features = features.merge(session_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    features['mb_pageview_avg_pages_per_session'] = (\n",
    "        features['mb_pageview_session_avg_length']\n",
    "    )\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d8347c91",
   "metadata": {},
   "source": [
    "### 9. 交叉特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "6f018ffa",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_page_module_cross_features(df):\n",
    "    \"\"\"\n",
    "    生成页面和模块的交叉特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df['page_mod_combo'] = df['CUR_PAGE'].astype(str) + '_' + df['MOD_NAME'].astype(str)\n",
    "    \n",
    "    combo_agg = df.groupby('CUST_NO')['page_mod_combo'].agg([\n",
    "        ('mb_pageview_page_mod_combo_nunique', 'nunique'),\n",
    "        ('mb_pageview_page_mod_combo_count', 'count')\n",
    "    ]).reset_index()\n",
    "    \n",
    "    features = features.merge(combo_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    features['mb_pageview_page_mod_combo_diversity'] = (\n",
    "        features['mb_pageview_page_mod_combo_nunique'] / (features['mb_pageview_page_mod_combo_count'] + 1)\n",
    "    )\n",
    "    \n",
    "    df['event_mod_combo'] = df['EVENT_NAME'].astype(str) + '_' + df['MOD_NAME'].astype(str)\n",
    "    \n",
    "    event_combo_agg = df.groupby('CUST_NO')['event_mod_combo'].nunique().reset_index(\n",
    "        name='mb_pageview_event_mod_combo_nunique'\n",
    "    )\n",
    "    \n",
    "    features = features.merge(event_combo_agg, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bbf5b988",
   "metadata": {},
   "source": [
    "### 10. 热门页面/事件/模块统计特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "4d0bec82",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_top_module_page_transition_features(df):\n",
    "    \"\"\"\n",
    "    生成Top30模块下的页面唯一数统计特征\n",
    "    参考往年代码gen_mb_join_op_unique_features_by_days\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    max_date = df['OPR_DATE'].max()\n",
    "    df['days_to_now'] = (max_date - df['OPR_DATE']).dt.days\n",
    "    df['weeks_to_now'] = df['days_to_now'] // 7\n",
    "    df['months_to_now'] = df['days_to_now'] // 31\n",
    "    \n",
    "    top_30_modules = df['MOD_NAME'].value_counts().head(30).index.tolist()\n",
    "    \n",
    "    df_top = df[df['MOD_NAME'].isin(top_30_modules)].copy()\n",
    "    \n",
    "    df_top['page_transition'] = df_top['LAST_PAGE'].astype(str) + '_' + df_top['CUR_PAGE'].astype(str)\n",
    "    \n",
    "    df_by_day = df_top.groupby(['CUST_NO', 'days_to_now', 'weeks_to_now', 'months_to_now', 'MOD_NAME'])['page_transition'].agg(['nunique', 'count'])\n",
    "    df_by_day = df_by_day.reset_index()\n",
    "    \n",
    "    tmp_df_nunique = df_by_day.groupby(['CUST_NO', 'MOD_NAME']).agg({'nunique': 'max'}).reset_index()\n",
    "    tmp_df_nunique = tmp_df_nunique.merge(\n",
    "        df_by_day[['CUST_NO', 'MOD_NAME', 'days_to_now', 'nunique']], \n",
    "        on=['CUST_NO', 'MOD_NAME', 'nunique'], \n",
    "        how='inner'\n",
    "    )\n",
    "    tmp_df_nunique = tmp_df_nunique.groupby(['CUST_NO', 'MOD_NAME'])['days_to_now'].min().to_frame('max_nunique_days_to_now').reset_index()\n",
    "    \n",
    "    tmp_df_nunique = pd.pivot(data=tmp_df_nunique, index='CUST_NO', columns='MOD_NAME', values='max_nunique_days_to_now')\n",
    "    tmp_df_nunique.columns = ['mb_pageview_mod_' + str(col) + '_max_nunique_days' for col in tmp_df_nunique.columns]\n",
    "    tmp_df_nunique = tmp_df_nunique.reset_index()\n",
    "    features = features.merge(tmp_df_nunique, on='CUST_NO', how='left')\n",
    "    \n",
    "    tmp_df_count = df_by_day.groupby(['CUST_NO', 'MOD_NAME']).agg({'count': 'max'}).reset_index()\n",
    "    tmp_df_count = tmp_df_count.merge(\n",
    "        df_by_day[['CUST_NO', 'MOD_NAME', 'days_to_now', 'count']], \n",
    "        on=['CUST_NO', 'MOD_NAME', 'count'], \n",
    "        how='inner'\n",
    "    )\n",
    "    tmp_df_count = tmp_df_count.groupby(['CUST_NO', 'MOD_NAME'])['days_to_now'].min().to_frame('max_count_days_to_now').reset_index()\n",
    "    \n",
    "    tmp_df_count = pd.pivot(data=tmp_df_count, index='CUST_NO', columns='MOD_NAME', values='max_count_days_to_now')\n",
    "    tmp_df_count.columns = ['mb_pageview_mod_' + str(col) + '_max_count_days' for col in tmp_df_count.columns]\n",
    "    tmp_df_count = tmp_df_count.reset_index()\n",
    "    features = features.merge(tmp_df_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3b3f2a42",
   "metadata": {},
   "source": [
    "### 11. 页面模块序列Word2Vec嵌入特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "1bb77a75",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_w2v_embedding_features(df, emb_size=16):\n",
    "    \"\"\"\n",
    "    生成页面/模块序列的Word2Vec嵌入特征\n",
    "    参考往年代码,为页面转换序列生成向量表示\n",
    "    \n",
    "    参数:\n",
    "    - df: 预处理后的数据框\n",
    "    - emb_size: 嵌入维度\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame()\n",
    "    features['CUST_NO'] = df['CUST_NO'].unique()\n",
    "    \n",
    "    df_sorted = df.sort_values(['CUST_NO', 'OPR_DATETIME']).copy()\n",
    "    \n",
    "    df_sorted['page_transition'] = (\n",
    "        df_sorted['LAST_PAGE'].astype(str) + '_to_' + df_sorted['CUR_PAGE'].astype(str)\n",
    "    )\n",
    "    \n",
    "    page_sequences = df_sorted.groupby('CUST_NO')['page_transition'].apply(list).reset_index()\n",
    "    page_sequences.columns = ['CUST_NO', 'page_sequence']\n",
    "    \n",
    "    sentences = page_sequences['page_sequence'].values.tolist()\n",
    "    \n",
    "    try:\n",
    "        from gensim.models import Word2Vec\n",
    "        \n",
    "        model = Word2Vec(\n",
    "            sentences, \n",
    "            vector_size=emb_size, \n",
    "            window=5, \n",
    "            min_count=2, \n",
    "            sg=1, \n",
    "            seed=42, \n",
    "            epochs=10, \n",
    "            workers=1\n",
    "        )\n",
    "        \n",
    "        emb_matrix = []\n",
    "        for seq in sentences:\n",
    "            vec = []\n",
    "            for w in seq:\n",
    "                if w in model.wv:\n",
    "                    vec.append(model.wv[w])\n",
    "            if len(vec) > 0:\n",
    "                emb_matrix.append(np.mean(vec, axis=0))\n",
    "            else:\n",
    "                emb_matrix.append([0] * emb_size)\n",
    "        \n",
    "        emb_matrix = np.array(emb_matrix)\n",
    "        \n",
    "        for i in range(emb_size):\n",
    "            page_sequences[f'mb_pageview_page_w2v_{i}'] = emb_matrix[:, i]\n",
    "        \n",
    "        page_sequences = page_sequences.drop('page_sequence', axis=1)\n",
    "        features = features.merge(page_sequences, on='CUST_NO', how='left')\n",
    "        \n",
    "    except ImportError:\n",
    "        print(\"警告: gensim未安装,跳过Word2Vec特征生成\")\n",
    "        for i in range(emb_size):\n",
    "            features[f'mb_pageview_page_w2v_{i}'] = 0\n",
    "    \n",
    "    df_sorted['module_sequence'] = df_sorted['MOD_NAME'].astype(str)\n",
    "    module_sequences = df_sorted.groupby('CUST_NO')['module_sequence'].apply(list).reset_index()\n",
    "    module_sequences.columns = ['CUST_NO', 'module_sequence']\n",
    "    \n",
    "    sentences_mod = module_sequences['module_sequence'].values.tolist()\n",
    "    \n",
    "    try:\n",
    "        model_mod = Word2Vec(\n",
    "            sentences_mod, \n",
    "            vector_size=emb_size, \n",
    "            window=5, \n",
    "            min_count=2, \n",
    "            sg=1, \n",
    "            seed=42, \n",
    "            epochs=10, \n",
    "            workers=1\n",
    "        )\n",
    "        \n",
    "        emb_matrix_mod = []\n",
    "        for seq in sentences_mod:\n",
    "            vec = []\n",
    "            for w in seq:\n",
    "                if w in model_mod.wv:\n",
    "                    vec.append(model_mod.wv[w])\n",
    "            if len(vec) > 0:\n",
    "                emb_matrix_mod.append(np.mean(vec, axis=0))\n",
    "            else:\n",
    "                emb_matrix_mod.append([0] * emb_size)\n",
    "        \n",
    "        emb_matrix_mod = np.array(emb_matrix_mod)\n",
    "        \n",
    "        for i in range(emb_size):\n",
    "            module_sequences[f'mb_pageview_module_w2v_{i}'] = emb_matrix_mod[:, i]\n",
    "        \n",
    "        module_sequences = module_sequences.drop('module_sequence', axis=1)\n",
    "        features = features.merge(module_sequences, on='CUST_NO', how='left')\n",
    "        \n",
    "    except ImportError:\n",
    "        for i in range(emb_size):\n",
    "            features[f'mb_pageview_module_w2v_{i}'] = 0\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "33fb7742",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_all_mb_pageview_features(df):\n",
    "    \"\"\"\n",
    "    整合所有掌银页面访问特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 原始数据框\n",
    "    \n",
    "    返回:\n",
    "    - 完整特征数据框\n",
    "    \"\"\"\n",
    "    print(\"开始数据预处理...\")\n",
    "    df_processed = preprocess_mb_pageview_data(df)\n",
    "    \n",
    "    print(\"1/11 生成基础统计特征...\")\n",
    "    features = generate_basic_statistics_features(df_processed)\n",
    "    \n",
    "    print(\"2/11 生成时间相关特征...\")\n",
    "    time_features = generate_time_based_features(df_processed)\n",
    "    features = features.merge(time_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"3/11 生成页面转换特征...\")\n",
    "    transition_features = generate_page_transition_features(df_processed)\n",
    "    features = features.merge(transition_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"4/11 生成事件特征...\")\n",
    "    event_features = generate_event_features(df_processed)\n",
    "    features = features.merge(event_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"5/11 生成模块特征...\")\n",
    "    module_features = generate_module_features(df_processed)\n",
    "    features = features.merge(module_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"6/11 生成时间窗口聚合特征...\")\n",
    "    time_window_features = generate_time_window_aggregation_features(df_processed)\n",
    "    features = features.merge(time_window_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"7/11 生成近期行为特征...\")\n",
    "    recent_features = generate_recent_behavior_features(df_processed)\n",
    "    features = features.merge(recent_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"8/11 生成会话特征...\")\n",
    "    session_features = generate_session_features(df_processed)\n",
    "    features = features.merge(session_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"9/11 生成交叉特征...\")\n",
    "    cross_features = generate_page_module_cross_features(df_processed)\n",
    "    features = features.merge(cross_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"10/11 生成Top30模块页面转换特征...\")\n",
    "    top_module_features = generate_top_module_page_transition_features(df_processed)\n",
    "    features = features.merge(top_module_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"11/11 生成Word2Vec嵌入特征...\")\n",
    "    w2v_features = generate_w2v_embedding_features(df_processed, emb_size=16)\n",
    "    features = features.merge(w2v_features, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"\\n特征生成完成! 总特征数: {features.shape[1] - 1}\")\n",
    "    print(f\"客户数: {features.shape[0]}\")\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45f7e7af",
   "metadata": {},
   "source": [
    "## 生成训练集特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "f2b60b31",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始数据预处理...\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "datetime64[ns] cannot be converted to IntegerDtype",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[39], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m Train_mb_pageview_features \u001b[38;5;241m=\u001b[39m \u001b[43mgenerate_all_mb_pageview_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_mb_pageview_dtl_data\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m      3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m特征预览:\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m      4\u001b[0m \u001b[38;5;28mprint\u001b[39m(Train_mb_pageview_features\u001b[38;5;241m.\u001b[39mhead())\n",
      "Cell \u001b[1;32mIn[34], line 12\u001b[0m, in \u001b[0;36mgenerate_all_mb_pageview_features\u001b[1;34m(df)\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m      3\u001b[0m \u001b[38;5;124;03m整合所有掌银页面访问特征\u001b[39;00m\n\u001b[0;32m      4\u001b[0m \u001b[38;5;124;03m\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m      9\u001b[0m \u001b[38;5;124;03m- 完整特征数据框\u001b[39;00m\n\u001b[0;32m     10\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m     11\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m开始数据预处理...\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m---> 12\u001b[0m df_processed \u001b[38;5;241m=\u001b[39m \u001b[43mpreprocess_mb_pageview_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdf\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     14\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m1/11 生成基础统计特征...\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m     15\u001b[0m features \u001b[38;5;241m=\u001b[39m generate_basic_statistics_features(df_processed)\n",
      "Cell \u001b[1;32mIn[38], line 14\u001b[0m, in \u001b[0;36mpreprocess_mb_pageview_data\u001b[1;34m(df)\u001b[0m\n\u001b[0;32m     11\u001b[0m df \u001b[38;5;241m=\u001b[39m df\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[0;32m     13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_DATE\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m df\u001b[38;5;241m.\u001b[39mcolumns \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_TIME\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m df\u001b[38;5;241m.\u001b[39mcolumns:\n\u001b[1;32m---> 14\u001b[0m     df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_DATE\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[43mdf\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mOPR_DATE\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mastype\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mInt64\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mastype(\u001b[38;5;28mstr\u001b[39m)\u001b[38;5;241m.\u001b[39mstr\u001b[38;5;241m.\u001b[39mreplace(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m<NA>\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mNaN\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     15\u001b[0m     df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_TIME\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_TIME\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mastype(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mInt64\u001b[39m\u001b[38;5;124m'\u001b[39m)\u001b[38;5;241m.\u001b[39mastype(\u001b[38;5;28mstr\u001b[39m)\u001b[38;5;241m.\u001b[39mstr\u001b[38;5;241m.\u001b[39mreplace(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m<NA>\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mNaN\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     17\u001b[0m     df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_DATE\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mto_datetime(df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mOPR_DATE\u001b[39m\u001b[38;5;124m'\u001b[39m], \u001b[38;5;28mformat\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m%\u001b[39m\u001b[38;5;124mY\u001b[39m\u001b[38;5;124m%\u001b[39m\u001b[38;5;124mm\u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m'\u001b[39m, errors\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcoerce\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\generic.py:6643\u001b[0m, in \u001b[0;36mNDFrame.astype\u001b[1;34m(self, dtype, copy, errors)\u001b[0m\n\u001b[0;32m   6637\u001b[0m     results \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m   6638\u001b[0m         ser\u001b[38;5;241m.\u001b[39mastype(dtype, copy\u001b[38;5;241m=\u001b[39mcopy, errors\u001b[38;5;241m=\u001b[39merrors) \u001b[38;5;28;01mfor\u001b[39;00m _, ser \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mitems()\n\u001b[0;32m   6639\u001b[0m     ]\n\u001b[0;32m   6641\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m   6642\u001b[0m     \u001b[38;5;66;03m# else, only a single dtype is given\u001b[39;00m\n\u001b[1;32m-> 6643\u001b[0m     new_data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_mgr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mastype\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merrors\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   6644\u001b[0m     res \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_constructor_from_mgr(new_data, axes\u001b[38;5;241m=\u001b[39mnew_data\u001b[38;5;241m.\u001b[39maxes)\n\u001b[0;32m   6645\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m res\u001b[38;5;241m.\u001b[39m__finalize__(\u001b[38;5;28mself\u001b[39m, method\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mastype\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\internals\\managers.py:430\u001b[0m, in \u001b[0;36mBaseBlockManager.astype\u001b[1;34m(self, dtype, copy, errors)\u001b[0m\n\u001b[0;32m    427\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m using_copy_on_write():\n\u001b[0;32m    428\u001b[0m     copy \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m--> 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mapply\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    431\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mastype\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    432\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    433\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    434\u001b[0m \u001b[43m    \u001b[49m\u001b[43merrors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merrors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    435\u001b[0m \u001b[43m    \u001b[49m\u001b[43musing_cow\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43musing_copy_on_write\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    436\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\internals\\managers.py:363\u001b[0m, in \u001b[0;36mBaseBlockManager.apply\u001b[1;34m(self, f, align_keys, **kwargs)\u001b[0m\n\u001b[0;32m    361\u001b[0m         applied \u001b[38;5;241m=\u001b[39m b\u001b[38;5;241m.\u001b[39mapply(f, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    362\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 363\u001b[0m         applied \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mgetattr\u001b[39m(b, f)(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    364\u001b[0m     result_blocks \u001b[38;5;241m=\u001b[39m extend_blocks(applied, result_blocks)\n\u001b[0;32m    366\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mtype\u001b[39m(\u001b[38;5;28mself\u001b[39m)\u001b[38;5;241m.\u001b[39mfrom_blocks(result_blocks, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maxes)\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\internals\\blocks.py:758\u001b[0m, in \u001b[0;36mBlock.astype\u001b[1;34m(self, dtype, copy, errors, using_cow, squeeze)\u001b[0m\n\u001b[0;32m    755\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCan not squeeze with more than one column.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    756\u001b[0m     values \u001b[38;5;241m=\u001b[39m values[\u001b[38;5;241m0\u001b[39m, :]  \u001b[38;5;66;03m# type: ignore[call-overload]\u001b[39;00m\n\u001b[1;32m--> 758\u001b[0m new_values \u001b[38;5;241m=\u001b[39m \u001b[43mastype_array_safe\u001b[49m\u001b[43m(\u001b[49m\u001b[43mvalues\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merrors\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    760\u001b[0m new_values \u001b[38;5;241m=\u001b[39m maybe_coerce_values(new_values)\n\u001b[0;32m    762\u001b[0m refs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\dtypes\\astype.py:237\u001b[0m, in \u001b[0;36mastype_array_safe\u001b[1;34m(values, dtype, copy, errors)\u001b[0m\n\u001b[0;32m    234\u001b[0m     dtype \u001b[38;5;241m=\u001b[39m dtype\u001b[38;5;241m.\u001b[39mnumpy_dtype\n\u001b[0;32m    236\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 237\u001b[0m     new_values \u001b[38;5;241m=\u001b[39m \u001b[43mastype_array\u001b[49m\u001b[43m(\u001b[49m\u001b[43mvalues\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    238\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mValueError\u001b[39;00m, \u001b[38;5;167;01mTypeError\u001b[39;00m):\n\u001b[0;32m    239\u001b[0m     \u001b[38;5;66;03m# e.g. _astype_nansafe can fail on object-dtype of strings\u001b[39;00m\n\u001b[0;32m    240\u001b[0m     \u001b[38;5;66;03m#  trying to convert to float\u001b[39;00m\n\u001b[0;32m    241\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m errors \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mignore\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\dtypes\\astype.py:179\u001b[0m, in \u001b[0;36mastype_array\u001b[1;34m(values, dtype, copy)\u001b[0m\n\u001b[0;32m    175\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m values\n\u001b[0;32m    177\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(values, np\u001b[38;5;241m.\u001b[39mndarray):\n\u001b[0;32m    178\u001b[0m     \u001b[38;5;66;03m# i.e. ExtensionArray\u001b[39;00m\n\u001b[1;32m--> 179\u001b[0m     values \u001b[38;5;241m=\u001b[39m \u001b[43mvalues\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mastype\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    181\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    182\u001b[0m     values \u001b[38;5;241m=\u001b[39m _astype_nansafe(values, dtype, copy\u001b[38;5;241m=\u001b[39mcopy)\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\datetimes.py:689\u001b[0m, in \u001b[0;36mDatetimeArray.astype\u001b[1;34m(self, dtype, copy)\u001b[0m\n\u001b[0;32m    686\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(dtype, ExtensionDtype):\n\u001b[0;32m    687\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(dtype, DatetimeTZDtype):\n\u001b[0;32m    688\u001b[0m         \u001b[38;5;66;03m# e.g. Sparse[datetime64[ns]]\u001b[39;00m\n\u001b[1;32m--> 689\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mastype\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    690\u001b[0m     \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtz \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    691\u001b[0m         \u001b[38;5;66;03m# pre-2.0 this did self.tz_localize(dtype.tz), which did not match\u001b[39;00m\n\u001b[0;32m    692\u001b[0m         \u001b[38;5;66;03m#  the Series behavior which did\u001b[39;00m\n\u001b[0;32m    693\u001b[0m         \u001b[38;5;66;03m#  values.tz_localize(\"UTC\").tz_convert(dtype.tz)\u001b[39;00m\n\u001b[0;32m    694\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[0;32m    695\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot use .astype to convert from timezone-naive dtype to \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    696\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimezone-aware dtype. Use obj.tz_localize instead or \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    697\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mseries.dt.tz_localize instead\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    698\u001b[0m         )\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\datetimelike.py:474\u001b[0m, in \u001b[0;36mDatetimeLikeArrayMixin.astype\u001b[1;34m(self, dtype, copy)\u001b[0m\n\u001b[0;32m    471\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_box_values(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39masi8\u001b[38;5;241m.\u001b[39mravel())\u001b[38;5;241m.\u001b[39mreshape(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mshape)\n\u001b[0;32m    473\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(dtype, ExtensionDtype):\n\u001b[1;32m--> 474\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mastype\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    475\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m is_string_dtype(dtype):\n\u001b[0;32m    476\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_native_types()\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\base.py:710\u001b[0m, in \u001b[0;36mExtensionArray.astype\u001b[1;34m(self, dtype, copy)\u001b[0m\n\u001b[0;32m    708\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(dtype, ExtensionDtype):\n\u001b[0;32m    709\u001b[0m     \u001b[38;5;28mcls\u001b[39m \u001b[38;5;241m=\u001b[39m dtype\u001b[38;5;241m.\u001b[39mconstruct_array_type()\n\u001b[1;32m--> 710\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_from_sequence\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    712\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m lib\u001b[38;5;241m.\u001b[39mis_np_dtype(dtype, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mM\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m    713\u001b[0m     \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mpandas\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01marrays\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m DatetimeArray\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\masked.py:152\u001b[0m, in \u001b[0;36mBaseMaskedArray._from_sequence\u001b[1;34m(cls, scalars, dtype, copy)\u001b[0m\n\u001b[0;32m    150\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[0;32m    151\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21m_from_sequence\u001b[39m(\u001b[38;5;28mcls\u001b[39m, scalars, \u001b[38;5;241m*\u001b[39m, dtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, copy: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Self:\n\u001b[1;32m--> 152\u001b[0m     values, mask \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_coerce_to_array\u001b[49m\u001b[43m(\u001b[49m\u001b[43mscalars\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    153\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mcls\u001b[39m(values, mask)\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\numeric.py:272\u001b[0m, in \u001b[0;36mNumericArray._coerce_to_array\u001b[1;34m(cls, value, dtype, copy)\u001b[0m\n\u001b[0;32m    270\u001b[0m dtype_cls \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_dtype_cls\n\u001b[0;32m    271\u001b[0m default_dtype \u001b[38;5;241m=\u001b[39m dtype_cls\u001b[38;5;241m.\u001b[39m_default_np_dtype\n\u001b[1;32m--> 272\u001b[0m values, mask, _, _ \u001b[38;5;241m=\u001b[39m \u001b[43m_coerce_to_data_and_mask\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    273\u001b[0m \u001b[43m    \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype_cls\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdefault_dtype\u001b[49m\n\u001b[0;32m    274\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    275\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m values, mask\n",
      "File \u001b[1;32mg:\\Anaconda3\\envs\\starcup\\lib\\site-packages\\pandas\\core\\arrays\\numeric.py:181\u001b[0m, in \u001b[0;36m_coerce_to_data_and_mask\u001b[1;34m(values, dtype, copy, dtype_cls, default_dtype)\u001b[0m\n\u001b[0;32m    179\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m values\u001b[38;5;241m.\u001b[39mdtype\u001b[38;5;241m.\u001b[39mkind \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124miuf\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m    180\u001b[0m     name \u001b[38;5;241m=\u001b[39m dtype_cls\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;241m.\u001b[39mstrip(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 181\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mvalues\u001b[38;5;241m.\u001b[39mdtype\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m cannot be converted to \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    183\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m values\u001b[38;5;241m.\u001b[39mndim \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[0;32m    184\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mvalues must be a 1D list-like\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "\u001b[1;31mTypeError\u001b[0m: datetime64[ns] cannot be converted to IntegerDtype"
     ]
    }
   ],
   "source": [
    "Train_mb_pageview_features = generate_all_mb_pageview_features(train_mb_pageview_dtl_data)\n",
    "\n",
    "print(\"\\n特征预览:\")\n",
    "print(Train_mb_pageview_features.head())\n",
    "print(f\"\\n特征维度: {Train_mb_pageview_features.shape}\")\n",
    "print(f\"\\n特征列表:\\n{Train_mb_pageview_features.columns.tolist()}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bbd57efb",
   "metadata": {},
   "source": [
    "## 生成测试集特征(预留代码)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "df1bd3f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# A_mb_pageview_dtl_data = pd.read_csv('./data/A/A_MB_PAGEVIEW_DTL.csv')\n",
    "# A_mb_pageview_features = generate_all_mb_pageview_features(A_mb_pageview_dtl_data)\n",
    "# print(\"\\n测试集特征预览:\")\n",
    "# print(A_mb_pageview_features.head())\n",
    "# print(f\"\\n测试集特征维度: {A_mb_pageview_features.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "0c1c9f86",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据类型检查:\n",
      "OPR_DATE    datetime64[ns]\n",
      "OPR_TIME            object\n",
      "dtype: object\n",
      "\n",
      "前5行数据:\n",
      "    OPR_DATE  OPR_TIME\n",
      "0 2013-10-01  07:26:19\n",
      "1 2013-10-03  07:43:12\n",
      "2 2013-10-01  10:22:52\n",
      "3 2013-10-07  00:12:46\n",
      "4 2013-10-02  11:00:57\n"
     ]
    }
   ],
   "source": [
    "print(\"数据类型检查:\")\n",
    "print(train_mb_pageview_dtl_data[['OPR_DATE', 'OPR_TIME']].dtypes)\n",
    "print(\"\\n前5行数据:\")\n",
    "print(train_mb_pageview_dtl_data[['OPR_DATE', 'OPR_TIME']].head())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "887c4acc",
   "metadata": {},
   "source": [
    "## 特征质量检查"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7e1dfca1",
   "metadata": {},
   "outputs": [],
   "source": [
    "def check_feature_quality(features, feature_name=\"特征\"):\n",
    "    \"\"\"\n",
    "    检查特征质量\n",
    "    \n",
    "    参数:\n",
    "    - features: 特征数据框\n",
    "    - feature_name: 特征集名称\n",
    "    \n",
    "    返回:\n",
    "    - 质量报告\n",
    "    \"\"\"\n",
    "    print(f\"\\n{'='*60}\")\n",
    "    print(f\"{feature_name}质量检查报告\")\n",
    "    print(f\"{'='*60}\")\n",
    "    \n",
    "    print(f\"\\n1. 基本信息:\")\n",
    "    print(f\"   特征数量: {features.shape[1] - 1} (不包括CUST_NO)\")\n",
    "    print(f\"   客户数量: {features.shape[0]}\")\n",
    "    \n",
    "    print(f\"\\n2. 缺失值分析:\")\n",
    "    missing_stats = pd.DataFrame({\n",
    "        '缺失数量': features.isnull().sum(),\n",
    "        '缺失比例': features.isnull().sum() / len(features) * 100\n",
    "    })\n",
    "    missing_stats = missing_stats[missing_stats['缺失数量'] > 0].sort_values('缺失比例', ascending=False)\n",
    "    \n",
    "    if len(missing_stats) > 0:\n",
    "        print(f\"   存在缺失值的特征数: {len(missing_stats)}\")\n",
    "        print(f\"\\n   缺失值Top10:\")\n",
    "        print(missing_stats.head(10))\n",
    "        \n",
    "        high_missing = missing_stats[missing_stats['缺失比例'] > 80]\n",
    "        if len(high_missing) > 0:\n",
    "            print(f\"\\n   警告: {len(high_missing)} 个特征缺失率超过80%\")\n",
    "            print(high_missing)\n",
    "    else:\n",
    "        print(\"   所有特征无缺失值\")\n",
    "    \n",
    "    print(f\"\\n3. 常数特征检查:\")\n",
    "    numeric_cols = features.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    if 'CUST_NO' in numeric_cols:\n",
    "        numeric_cols.remove('CUST_NO')\n",
    "    \n",
    "    const_features = []\n",
    "    for col in numeric_cols:\n",
    "        if features[col].nunique() <= 1:\n",
    "            const_features.append(col)\n",
    "    \n",
    "    if len(const_features) > 0:\n",
    "        print(f\"   发现 {len(const_features)} 个常数特征:\")\n",
    "        for feat in const_features[:10]:\n",
    "            print(f\"   - {feat}\")\n",
    "    else:\n",
    "        print(\"   无常数特征\")\n",
    "    \n",
    "    print(f\"\\n4. 数据类型分布:\")\n",
    "    print(features.dtypes.value_counts())\n",
    "    \n",
    "    print(f\"\\n5. 数值特征统计:\")\n",
    "    numeric_features = features[numeric_cols]\n",
    "    print(f\"   最小值: {numeric_features.min().min()}\")\n",
    "    print(f\"   最大值: {numeric_features.max().max()}\")\n",
    "    print(f\"   均值范围: [{numeric_features.mean().min():.2f}, {numeric_features.mean().max():.2f}]\")\n",
    "    \n",
    "    print(f\"\\n6. 无穷值检查:\")\n",
    "    inf_count = np.isinf(numeric_features).sum().sum()\n",
    "    if inf_count > 0:\n",
    "        print(f\"   警告: 发现 {inf_count} 个无穷值\")\n",
    "        inf_cols = np.isinf(numeric_features).sum()\n",
    "        inf_cols = inf_cols[inf_cols > 0]\n",
    "        print(f\"   包含无穷值的特征:\\n{inf_cols}\")\n",
    "    else:\n",
    "        print(\"   无无穷值\")\n",
    "    \n",
    "    print(f\"\\n{'='*60}\\n\")\n",
    "    \n",
    "    return missing_stats\n",
    "\n",
    "missing_report = check_feature_quality(Train_mb_pageview_features, \"训练集掌银页面访问特征\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4175c3c9",
   "metadata": {},
   "source": [
    "## 保存特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "adf746ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "feature_dir = './feature'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征目录: {feature_dir}\")\n",
    "\n",
    "train_feature_path = os.path.join(feature_dir, 'Train_mb_pageview_features.pkl')\n",
    "with open(train_feature_path, 'wb') as f:\n",
    "    pickle.dump(Train_mb_pageview_features, f)\n",
    "print(f\"训练集特征已保存至: {train_feature_path}\")\n",
    "\n",
    "# test_feature_path = os.path.join(feature_dir, 'A_mb_pageview_features.pkl')\n",
    "# with open(test_feature_path, 'wb') as f:\n",
    "#     pickle.dump(A_mb_pageview_features, f)\n",
    "# print(f\"测试集特征已保存至: {test_feature_path}\")\n",
    "\n",
    "print(\"\\n特征保存完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "32bd898d",
   "metadata": {},
   "source": [
    "## 特征汇总说明"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34510d07",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"=\"*80)\n",
    "print(\"掌银页面访问明细表特征工程汇总\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "feature_groups = {\n",
    "    \"1. 基础统计特征\": [\n",
    "        \"总访问次数(total_count)\",\n",
    "        \"唯一当前页面数(cur_page_nunique)\",\n",
    "        \"唯一上一页面数(last_page_nunique)\",\n",
    "        \"唯一事件数(event_nunique)\",\n",
    "        \"唯一模块数(mod_nunique)\",\n",
    "        \"活跃天数(active_days)\",\n",
    "        \"平均每日访问次数(avg_daily_count)\",\n",
    "        \"页面多样性(page_diversity)\"\n",
    "    ],\n",
    "    \n",
    "    \"2. 时间行为特征\": [\n",
    "        \"访问小时统计(hour_mean/std/min/max)\",\n",
    "        \"各时段访问次数(morning/afternoon/evening/night_count)\",\n",
    "        \"工作日统计(weekday_mean/std)\",\n",
    "        \"周末访问次数(weekend_count)\",\n",
    "        \"访问时间间隔统计(time_diff_mean/std/min/max)\"\n",
    "    ],\n",
    "    \n",
    "    \"3. 页面转换特征\": [\n",
    "        \"页面转换类型数(transition_nunique)\",\n",
    "        \"页面转换次数(transition_count)\",\n",
    "        \"转换多样性(transition_diversity)\",\n",
    "        \"自循环次数(self_loop_count)\",\n",
    "        \"自循环占比(self_loop_ratio)\"\n",
    "    ],\n",
    "    \n",
    "    \"4. 事件特征\": [\n",
    "        \"各事件类型访问次数(event_XXX_count)\",\n",
    "        \"各事件类型访问占比(event_XXX_ratio)\"\n",
    "    ],\n",
    "    \n",
    "    \"5. 模块特征\": [\n",
    "        \"Top30模块访问次数(mod_XXX_count)\",\n",
    "        \"模块访问总次数(mod_count)\",\n",
    "        \"唯一模块数(mod_nunique)\",\n",
    "        \"模块多样性(mod_diversity)\"\n",
    "    ],\n",
    "    \n",
    "    \"6. 时间窗口聚合特征\": [\n",
    "        \"页面访问最大唯一数对应天数(max_nunique_days_to_now)\",\n",
    "        \"页面访问最大次数对应天数(max_count_days_to_now)\",\n",
    "        \"按周聚合统计(week_count_mean/std/max/min)\",\n",
    "        \"按月聚合统计(month_count_mean/std/max/min)\"\n",
    "    ],\n",
    "    \n",
    "    \"7. 近期行为特征(7/14/30/60/90天)\": [\n",
    "        \"近期访问次数(lastXXd_count)\",\n",
    "        \"近期唯一页面数(lastXXd_page_nunique)\",\n",
    "        \"近期唯一模块数(lastXXd_mod_nunique)\",\n",
    "        \"近期唯一事件数(lastXXd_event_nunique)\",\n",
    "        \"近期活跃天数(lastXXd_active_days)\",\n",
    "        \"近期平均每日访问(lastXXd_avg_daily)\",\n",
    "        \"访问增长率(growth_XXtoYYd)\"\n",
    "    ],\n",
    "    \n",
    "    \"8. 会话特征\": [\n",
    "        \"会话数量(session_count)\",\n",
    "        \"会话平均长度(session_avg_length)\",\n",
    "        \"会话最大/最小长度(session_max/min_length)\",\n",
    "        \"会话长度标准差(session_std_length)\",\n",
    "        \"平均每会话页面数(avg_pages_per_session)\"\n",
    "    ],\n",
    "    \n",
    "    \"9. 交叉特征\": [\n",
    "        \"页面-模块组合唯一数(page_mod_combo_nunique)\",\n",
    "        \"页面-模块组合次数(page_mod_combo_count)\",\n",
    "        \"页面-模块组合多样性(page_mod_combo_diversity)\",\n",
    "        \"事件-模块组合唯一数(event_mod_combo_nunique)\"\n",
    "    ],\n",
    "    \n",
    "    \"10. Top30模块页面转换特征\": [\n",
    "        \"各模块最大页面唯一数对应天数(mod_XXX_max_nunique_days)\",\n",
    "        \"各模块最大访问次数对应天数(mod_XXX_max_count_days)\",\n",
    "        \"反映用户在不同模块的活跃时间模式\"\n",
    "    ],\n",
    "    \n",
    "    \"11. Word2Vec嵌入特征\": [\n",
    "        \"页面转换序列嵌入(page_w2v_0~15, 16维)\",\n",
    "        \"模块序列嵌入(module_w2v_0~15, 16维)\",\n",
    "        \"捕捉页面/模块访问的序列模式和上下文信息\"\n",
    "    ]\n",
    "}\n",
    "\n",
    "print(\"\\n特征分类及说明:\\n\")\n",
    "for group_name, features in feature_groups.items():\n",
    "    print(f\"{group_name}:\")\n",
    "    for feat in features:\n",
    "        print(f\"  - {feat}\")\n",
    "    print()\n",
    "\n",
    "print(\"=\"*80)\n",
    "print(\"特征工程要点:\")\n",
    "print(\"=\"*80)\n",
    "print(\"1. 充分挖掘用户在掌银的浏览行为模式\")\n",
    "print(\"2. 数据已哈希脱敏,采用统计方法而非关键词筛选\")\n",
    "print(\"3. 分析用户活跃度和行为习惯(时间分布、频率等)\")\n",
    "print(\"4. 构建页面转换路径特征,了解用户浏览轨迹\")\n",
    "print(\"5. 时间窗口聚合特征捕捉用户行为的时间演化规律\")\n",
    "print(\"6. 近期行为特征捕捉用户最新的兴趣变化\")\n",
    "print(\"7. 会话特征反映用户单次使用深度\")\n",
    "print(\"8. 交叉特征捕捉不同维度组合的行为模式\")\n",
    "print(\"9. Top30模块特征识别用户在关键模块的活跃模式\")\n",
    "print(\"10. Word2Vec嵌入特征学习页面/模块序列的语义表示\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "print(\"\\n参考往年优秀方案,本方案特色:\")\n",
    "print(\"- 按天/周/月时间窗口聚合,发现行为周期性\")\n",
    "print(\"- 计算关键行为最大值对应时间,识别活跃高峰\")\n",
    "print(\"- 使用Word2Vec学习序列嵌入,捕捉隐藏模式\")\n",
    "print(\"- Top30模块细粒度分析,精准刻画重点模块偏好\")\n",
    "print(\"=\"*80)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
