{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "09f7126f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c6ab2b72",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "import seaborn as sns\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a08d6044",
   "metadata": {},
   "source": [
    "## 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df055add",
   "metadata": {},
   "source": [
    "## 通用导入函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "74bcbf7b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "993c86ce",
   "metadata": {},
   "source": [
    "## 训练集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "722d1e40",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 TRAIN_ASSET_data 已加载为 DataFrame\n",
      "数据集 TRAIN_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_CUST_INFO_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_TRNFLW_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_NATURE_data 已加载为 DataFrame\n",
      "数据集 TRAIN_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TR_APS_DTL_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = './data/Train'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "82e70968",
   "metadata": {},
   "source": [
    "## 测试集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c3ef0f15",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 A_ASSET_data 已加载为 DataFrame\n",
      "数据集 A_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_CUST_INFO_data 已加载为 DataFrame\n",
      "数据集 A_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_TRNFLW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 A_TEST_NATURE_data 已加载为 DataFrame\n",
      "数据集 A_TR_APS_DTL_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "A_load_dt = './data/A'\n",
    "A_data_name = load_data_from_directory(A_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b6a9129f",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3424a7f0",
   "metadata": {},
   "source": [
    "## 1. 数据预处理与时间特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "42f09f09",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================================================\n",
      "处理训练集数据\n",
      "==================================================\n",
      "处理后数据形状: (2000, 18)\n",
      "时间范围: 2025-04-21 00:00:00 ~ 2025-04-30 00:00:00\n",
      "月份分布:\\ndate_months_to_now\n",
      "0    2000\n",
      "Name: count, dtype: int64\n",
      "\\n==================================================\n",
      "处理测试集数据\n",
      "==================================================\n",
      "处理后数据形状: (1000, 18)\n",
      "时间范围: 2025-04-06 00:00:00 ~ 2025-04-11 00:00:00\n",
      "月份分布:\\ndate_months_to_now\n",
      "0    1000\n",
      "Name: count, dtype: int64\n",
      "\\n数据预处理完成!\n"
     ]
    }
   ],
   "source": [
    "def process_mb_pageview_time_features(df):\n",
    "    \"\"\"\n",
    "    处理掌银页面访问明细表的时间特征\n",
    "    包含:距今天数、月份、周数、周几、是否周末、是否月初月末等\n",
    "    \"\"\"\n",
    "    df = df.copy()\n",
    "    \n",
    "    # 转换日期格式\n",
    "    df[\"OPR_DATE\"] = pd.to_datetime(df[\"OPR_DATE\"], format=\"%Y%m%d\")\n",
    "    \n",
    "    # 计算距今天数\n",
    "    max_date = df[\"OPR_DATE\"].max()\n",
    "    df_days_to_now = (max_date - df[\"OPR_DATE\"]).dt.days\n",
    "    df[\"date_days_to_now\"] = df_days_to_now\n",
    "    df[\"date_weeks_to_now\"] = df_days_to_now // 7\n",
    "    df[\"date_months_to_now\"] = df_days_to_now // 31\n",
    "    \n",
    "    # 提取时间维度\n",
    "    df[\"opr_month\"] = df[\"OPR_DATE\"].dt.month\n",
    "    df[\"opr_day\"] = df[\"OPR_DATE\"].dt.day\n",
    "    df[\"opr_dayofweek\"] = df[\"OPR_DATE\"].dt.dayofweek\n",
    "    df[\"opr_is_weekend\"] = df[\"opr_dayofweek\"].isin([5, 6]).astype(int)\n",
    "    df[\"opr_is_month_start\"] = df[\"OPR_DATE\"].dt.is_month_start.astype(int)\n",
    "    df[\"opr_is_month_end\"] = df[\"OPR_DATE\"].dt.is_month_end.astype(int)\n",
    "    \n",
    "    # 提取时间特征\n",
    "    if \"OPR_TIME\" in df.columns:\n",
    "        df[\"opr_hour\"] = df[\"OPR_TIME\"].astype(str).str[:2].astype(int)\n",
    "        df[\"opr_time_period\"] = pd.cut(df[\"opr_hour\"], \n",
    "                                       bins=[0, 6, 12, 18, 24],\n",
    "                                       labels=['night', 'morning', 'afternoon', 'evening'],\n",
    "                                       include_lowest=True)\n",
    "    \n",
    "    print(f\"处理后数据形状: {df.shape}\")\n",
    "    print(f\"时间范围: {df['OPR_DATE'].min()} ~ {df['OPR_DATE'].max()}\")\n",
    "    print(f\"月份分布:\\\\n{df['date_months_to_now'].value_counts().sort_index()}\")\n",
    "    \n",
    "    return df\n",
    "\n",
    "# 处理训练集和测试集\n",
    "print(\"=\" * 50)\n",
    "print(\"处理训练集数据\")\n",
    "print(\"=\" * 50)\n",
    "TRAIN_MB_PAGEVIEW_DTL_data = process_mb_pageview_time_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "\n",
    "print(\"\\\\n\" + \"=\" * 50)\n",
    "print(\"处理测试集数据\")\n",
    "print(\"=\" * 50)\n",
    "A_MB_PAGEVIEW_DTL_data = process_mb_pageview_time_features(A_MB_PAGEVIEW_DTL_data)\n",
    "\n",
    "print(\"\\\\n数据预处理完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4b3e919b",
   "metadata": {},
   "source": [
    "## 2. RFM基础特征(Recency-Frequency-Monetary)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5a341c30",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集RFM特征...\n",
      "生成RFM-R特征: 最近一次活跃时间...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "RFM-R: 100%|██████████| 3/3 [00:00<00:00, 173.88it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成RFM-F特征: 访问频次统计...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "RFM-F: 100%|██████████| 6/6 [00:00<00:00, 247.46it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成RFM-M特征: 访问多样性...\n",
      "训练集RFM特征数量: 34\\n\n",
      "生成测试集RFM特征...\n",
      "生成RFM-R特征: 最近一次活跃时间...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "RFM-R: 100%|██████████| 3/3 [00:00<00:00, 205.56it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成RFM-F特征: 访问频次统计...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "RFM-F: 100%|██████████| 6/6 [00:00<00:00, 243.78it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成RFM-M特征: 访问多样性...\n",
      "测试集RFM特征数量: 34\n",
      "\\nRFM特征生成完成!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_rfm_features(df):\n",
    "    \"\"\"\n",
    "    生成掌银页面访问的RFM特征\n",
    "    R (Recency): 最近访问时间\n",
    "    F (Frequency): 访问频次\n",
    "    M (Monetary): 访问页面多样性\n",
    "    \"\"\"\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 按天聚合\n",
    "    df_by_day = df.groupby([\"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \"date_months_to_now\"]).agg({\n",
    "        'CUR_PAGE': ['nunique', 'count'],\n",
    "        'LAST_PAGE': 'nunique',\n",
    "        'MOD_NAME': 'nunique',\n",
    "        'EVENT_NAME': 'nunique'\n",
    "    })\n",
    "    df_by_day.columns = ['cur_page_nunique', 'cur_page_count', 'last_page_nunique', 'mod_name_nunique', 'event_name_nunique']\n",
    "    df_by_day = df_by_day.reset_index()\n",
    "    \n",
    "    print(\"生成RFM-R特征: 最近一次活跃时间...\")\n",
    "    # RFM-R: 每月日点击笔数/日点击页面数最大的那天距今天数\n",
    "    for month in tqdm([0, 1, 2], desc='RFM-R'):\n",
    "        df_month = df_by_day[df_by_day[\"date_months_to_now\"] == month]\n",
    "        \n",
    "        # 日点击页面数最多的那天距今天数\n",
    "        tmp_nunique = df_month.groupby(['CUST_NO']).agg({\"cur_page_nunique\": \"max\"}).reset_index()\n",
    "        tmp_nunique = tmp_nunique.merge(\n",
    "            df_month[['CUST_NO', 'date_days_to_now', 'cur_page_nunique']], \n",
    "            on=[\"CUST_NO\", 'cur_page_nunique'], how=\"inner\"\n",
    "        )\n",
    "        tmp_nunique = tmp_nunique.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\n",
    "            f\"max_page_nunique_days_m{month}\"\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp_nunique, how=\"left\", on=\"CUST_NO\")\n",
    "        \n",
    "        # 日点击次数最多的那天距今天数\n",
    "        tmp_cnt = df_month.groupby(['CUST_NO']).agg({\"cur_page_count\": \"max\"}).reset_index()\n",
    "        tmp_cnt = tmp_cnt.merge(\n",
    "            df_month[['CUST_NO', 'date_days_to_now', 'cur_page_count']], \n",
    "            on=[\"CUST_NO\", 'cur_page_count'], how=\"inner\"\n",
    "        )\n",
    "        tmp_cnt = tmp_cnt.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\n",
    "            f\"max_page_count_days_m{month}\"\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp_cnt, how=\"left\", on=\"CUST_NO\")\n",
    "    \n",
    "    print(\"生成RFM-F特征: 访问频次统计...\")\n",
    "    # RFM-F: 整体访问频次统计\n",
    "    stats_list = ['mean', 'max', 'min', 'median', 'std', 'sum']\n",
    "    for stat in tqdm(stats_list, desc='RFM-F'):\n",
    "        # 每日点击页面数统计\n",
    "        tmp = df_by_day.groupby(['CUST_NO'])['cur_page_nunique'].agg(stat).to_frame(\n",
    "            f'daily_page_nunique_{stat}'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 每日点击次数统计\n",
    "        tmp = df_by_day.groupby(['CUST_NO'])['cur_page_count'].agg(stat).to_frame(\n",
    "            f'daily_page_count_{stat}'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 每日模块数统计\n",
    "        tmp = df_by_day.groupby(['CUST_NO'])['mod_name_nunique'].agg(stat).to_frame(\n",
    "            f'daily_mod_nunique_{stat}'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 每日事件数统计\n",
    "        tmp = df_by_day.groupby(['CUST_NO'])['event_name_nunique'].agg(stat).to_frame(\n",
    "            f'daily_event_nunique_{stat}'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成RFM-M特征: 访问多样性...\")\n",
    "    # RFM-M: 访问页面的多样性\n",
    "    tmp = df.groupby('CUST_NO').agg({\n",
    "        'CUR_PAGE': 'nunique',\n",
    "        'LAST_PAGE': 'nunique',\n",
    "        'MOD_NAME': 'nunique',\n",
    "        'EVENT_NAME': 'nunique'\n",
    "    })\n",
    "    tmp.columns = ['total_cur_page_nunique', 'total_last_page_nunique', 'total_mod_nunique', 'total_event_nunique']\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    return feature\n",
    "\n",
    "# 生成训练集和测试集的RFM特征\n",
    "print(\"\\\\n生成训练集RFM特征...\")\n",
    "train_rfm_features = gen_mb_rfm_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集RFM特征数量: {train_rfm_features.shape[1] - 1}\\\\n\")\n",
    "\n",
    "print(\"生成测试集RFM特征...\")\n",
    "test_rfm_features = gen_mb_rfm_features(A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"测试集RFM特征数量: {test_rfm_features.shape[1] - 1}\")\n",
    "\n",
    "print(f\"\\\\nRFM特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "976dcebb",
   "metadata": {},
   "source": [
    "## 3. 页面路径跳转特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "29a6fe3b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成页面路径跳转特征...\n",
      "创建页面路径...\n",
      "使用训练集Top页面路径数: 50\n",
      "使用训练集Top模块路径数: 47\n",
      "\\n生成路径多样性特征...\n",
      "生成Top页面路径访问次数特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Top页面路径: 100%|██████████| 50/50 [00:00<00:00, 439.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成Top模块路径访问次数特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Top模块路径: 100%|██████████| 47/47 [00:00<00:00, 504.06it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征数量验证通过: 103 个特征\n",
      "\\n训练集路径特征数量: 103\n",
      "测试集路径特征数量: 103\n",
      "\\n页面路径特征生成完成!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_path_features(df_train, df_test):\n",
    "    \"\"\"\n",
    "    生成页面和模块之间的跳转路径特征\n",
    "    确保训练集和测试集特征完全一致\n",
    "    \"\"\"\n",
    "    feature_train = df_train[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    feature_test = df_test[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 建立页面到模块的映射字典(训练集+测试集)\n",
    "    df_all = pd.concat([df_train, df_test], axis=0)\n",
    "    page2mod_dict = dict(zip(df_all['CUR_PAGE'], df_all['MOD_NAME']))\n",
    "    \n",
    "    print(\"创建页面路径...\")\n",
    "    # 创建页面跳转路径: LAST_PAGE -> CUR_PAGE\n",
    "    df_train = df_train.copy()\n",
    "    df_test = df_test.copy()\n",
    "    df_train['page_path'] = df_train['LAST_PAGE'].astype(str) + '_' + df_train['CUR_PAGE'].astype(str)\n",
    "    df_train['last_mod'] = df_train['LAST_PAGE'].map(page2mod_dict)\n",
    "    df_train['mod_path'] = df_train['last_mod'].astype(str) + '_' + df_train['MOD_NAME'].astype(str)\n",
    "    \n",
    "    df_test['page_path'] = df_test['LAST_PAGE'].astype(str) + '_' + df_test['CUR_PAGE'].astype(str)\n",
    "    df_test['last_mod'] = df_test['LAST_PAGE'].map(page2mod_dict)\n",
    "    df_test['mod_path'] = df_test['last_mod'].astype(str) + '_' + df_test['MOD_NAME'].astype(str)\n",
    "    \n",
    "    # 使用训练集统计Top路径\n",
    "    top_page_paths = df_train['page_path'].value_counts().head(50).index.tolist()\n",
    "    top_mod_paths = df_train['mod_path'].value_counts().head(47).index.tolist()\n",
    "    \n",
    "    print(f\"使用训练集Top页面路径数: {len(top_page_paths)}\")\n",
    "    print(f\"使用训练集Top模块路径数: {len(top_mod_paths)}\")\n",
    "    \n",
    "    print(\"\\\\n生成路径多样性特征...\")\n",
    "    # 训练集路径多样性特征\n",
    "    tmp = df_train.groupby('CUST_NO')['page_path'].agg([\n",
    "        ('unique_page_paths', 'nunique'),\n",
    "        ('total_page_visits', 'count')\n",
    "    ]).reset_index()\n",
    "    tmp['page_path_diversity'] = tmp['unique_page_paths'] / (tmp['total_page_visits'] + 1)\n",
    "    feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    tmp = df_train.groupby('CUST_NO')['mod_path'].agg([\n",
    "        ('unique_mod_paths', 'nunique'),\n",
    "        ('total_mod_visits', 'count')\n",
    "    ]).reset_index()\n",
    "    tmp['mod_path_diversity'] = tmp['unique_mod_paths'] / (tmp['total_mod_visits'] + 1)\n",
    "    feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 测试集路径多样性特征\n",
    "    tmp = df_test.groupby('CUST_NO')['page_path'].agg([\n",
    "        ('unique_page_paths', 'nunique'),\n",
    "        ('total_page_visits', 'count')\n",
    "    ]).reset_index()\n",
    "    tmp['page_path_diversity'] = tmp['unique_page_paths'] / (tmp['total_page_visits'] + 1)\n",
    "    feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    tmp = df_test.groupby('CUST_NO')['mod_path'].agg([\n",
    "        ('unique_mod_paths', 'nunique'),\n",
    "        ('total_mod_visits', 'count')\n",
    "    ]).reset_index()\n",
    "    tmp['mod_path_diversity'] = tmp['unique_mod_paths'] / (tmp['total_mod_visits'] + 1)\n",
    "    feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成Top页面路径访问次数特征...\")\n",
    "    # 关键修复: 基于训练集统计Top路径,确保训练集和测试集使用相同的路径列表\n",
    "    for path in tqdm(top_page_paths, desc='Top页面路径'):\n",
    "        # 训练集\n",
    "        tmp = df_train[df_train['page_path'] == path].groupby('CUST_NO').size().to_frame(\n",
    "            f'page_path_{path}_cnt'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 测试集 - 使用相同的路径\n",
    "        tmp = df_test[df_test['page_path'] == path].groupby('CUST_NO').size().to_frame(\n",
    "            f'page_path_{path}_cnt'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成Top模块路径访问次数特征...\")\n",
    "    # 关键修复: 基于训练集统计Top路径,确保训练集和测试集使用相同的路径列表\n",
    "    for path in tqdm(top_mod_paths, desc='Top模块路径'):\n",
    "        # 训练集\n",
    "        tmp = df_train[df_train['mod_path'] == path].groupby('CUST_NO').size().to_frame(\n",
    "            f'mod_path_{path}_cnt'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 测试集 - 使用相同的路径\n",
    "        tmp = df_test[df_test['mod_path'] == path].groupby('CUST_NO').size().to_frame(\n",
    "            f'mod_path_{path}_cnt'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 验证特征数量一致\n",
    "    assert feature_train.shape[1] == feature_test.shape[1], f\"特征数量不一致! 训练集:{feature_train.shape[1]}, 测试集:{feature_test.shape[1]}\"\n",
    "    print(f\"特征数量验证通过: {feature_train.shape[1] - 1} 个特征\")\n",
    "    \n",
    "    return feature_train, feature_test\n",
    "\n",
    "# 生成训练集和测试集的路径特征\n",
    "print(\"生成页面路径跳转特征...\")\n",
    "train_path_features, test_path_features = gen_mb_path_features(TRAIN_MB_PAGEVIEW_DTL_data, A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"\\\\n训练集路径特征数量: {train_path_features.shape[1] - 1}\")\n",
    "print(f\"测试集路径特征数量: {test_path_features.shape[1] - 1}\")\n",
    "print(\"\\\\n页面路径特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "602ff57c",
   "metadata": {},
   "source": [
    "## 4. 页面/模块Top统计特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "19b85200",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集和测试集Top页面/模块特征...\n",
      "基于训练集选择Top60页面和Top40模块\n",
      "Top页面数: 60, Top模块数: 25\n",
      "生成Top页面访问特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Top页面特征: 100%|██████████| 60/60 [00:00<00:00, 184.06it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成Top模块访问特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Top模块特征: 100%|██████████| 25/25 [00:00<00:00, 113.00it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征数量验证通过: 280 个特征\n",
      "训练集Top特征数量: 280\n",
      "测试集Top特征数量: 280\n",
      "\\nTop特征生成完成!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_top_page_module_features(df_train, df_test):\n",
    "    \"\"\"\n",
    "    生成Top页面和Top模块的访问统计特征\n",
    "    关键修复: 基于训练集统计Top列表,确保训练集和测试集使用相同的Top列表\n",
    "    \"\"\"\n",
    "    feature_train = df_train[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    feature_test = df_test[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 关键修复: 基于训练集统计Top页面和Top模块\n",
    "    top_pages = df_train['CUR_PAGE'].value_counts().head(60).index.tolist()\n",
    "    top_modules = df_train['MOD_NAME'].value_counts().head(40).index.tolist()\n",
    "    \n",
    "    print(f\"基于训练集选择Top60页面和Top40模块\")\n",
    "    print(f\"Top页面数: {len(top_pages)}, Top模块数: {len(top_modules)}\")\n",
    "    \n",
    "    print(\"生成Top页面访问特征...\")\n",
    "    # Top页面的访问统计\n",
    "    for page in tqdm(top_pages, desc='Top页面特征'):\n",
    "        # 训练集\n",
    "        df_page_train = df_train[df_train['CUR_PAGE'] == page]\n",
    "        tmp = df_page_train.groupby('CUST_NO').size().to_frame(f'page_{page}_cnt').reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_page_train.groupby('CUST_NO')['date_days_to_now'].nunique().to_frame(\n",
    "            f'page_{page}_visit_days'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_page_train.groupby('CUST_NO')['date_days_to_now'].min().to_frame(\n",
    "            f'page_{page}_last_visit'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 测试集 - 使用相同的页面\n",
    "        df_page_test = df_test[df_test['CUR_PAGE'] == page]\n",
    "        tmp = df_page_test.groupby('CUST_NO').size().to_frame(f'page_{page}_cnt').reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_page_test.groupby('CUST_NO')['date_days_to_now'].nunique().to_frame(\n",
    "            f'page_{page}_visit_days'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_page_test.groupby('CUST_NO')['date_days_to_now'].min().to_frame(\n",
    "            f'page_{page}_last_visit'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成Top模块访问特征...\")\n",
    "    # Top模块的访问统计\n",
    "    for module in tqdm(top_modules, desc='Top模块特征'):\n",
    "        # 训练集\n",
    "        df_module_train = df_train[df_train['MOD_NAME'] == module]\n",
    "        tmp = df_module_train.groupby('CUST_NO').size().to_frame(f'module_{module}_cnt').reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_module_train.groupby('CUST_NO')['CUR_PAGE'].nunique().to_frame(\n",
    "            f'module_{module}_unique_pages'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_module_train.groupby('CUST_NO')['date_days_to_now'].min().to_frame(\n",
    "            f'module_{module}_last_visit'\n",
    "        ).reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp_total = df_train.groupby('CUST_NO').size().to_frame('total_cnt').reset_index()\n",
    "        tmp_module = df_module_train.groupby('CUST_NO').size().to_frame(f'module_{module}_cnt_ratio').reset_index()\n",
    "        tmp = tmp_total.merge(tmp_module, on='CUST_NO', how='left')\n",
    "        tmp[f'module_{module}_cnt_ratio'] = tmp[f'module_{module}_cnt_ratio'] / (tmp['total_cnt'] + 1)\n",
    "        feature_train = feature_train.merge(tmp[['CUST_NO', f'module_{module}_cnt_ratio']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 测试集 - 使用相同的模块\n",
    "        df_module_test = df_test[df_test['MOD_NAME'] == module]\n",
    "        tmp = df_module_test.groupby('CUST_NO').size().to_frame(f'module_{module}_cnt').reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_module_test.groupby('CUST_NO')['CUR_PAGE'].nunique().to_frame(\n",
    "            f'module_{module}_unique_pages'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp = df_module_test.groupby('CUST_NO')['date_days_to_now'].min().to_frame(\n",
    "            f'module_{module}_last_visit'\n",
    "        ).reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp_total = df_test.groupby('CUST_NO').size().to_frame('total_cnt').reset_index()\n",
    "        tmp_module = df_module_test.groupby('CUST_NO').size().to_frame(f'module_{module}_cnt_ratio').reset_index()\n",
    "        tmp = tmp_total.merge(tmp_module, on='CUST_NO', how='left')\n",
    "        tmp[f'module_{module}_cnt_ratio'] = tmp[f'module_{module}_cnt_ratio'] / (tmp['total_cnt'] + 1)\n",
    "        feature_test = feature_test.merge(tmp[['CUST_NO', f'module_{module}_cnt_ratio']], on='CUST_NO', how='left')\n",
    "    \n",
    "    # 验证特征数量一致\n",
    "    assert feature_train.shape[1] == feature_test.shape[1], f\"特征数量不一致! 训练集:{feature_train.shape[1]}, 测试集:{feature_test.shape[1]}\"\n",
    "    print(f\"特征数量验证通过: {feature_train.shape[1] - 1} 个特征\")\n",
    "    \n",
    "    return feature_train, feature_test\n",
    "\n",
    "# 生成训练集和测试集的Top特征\n",
    "print(\"\\\\n生成训练集和测试集Top页面/模块特征...\")\n",
    "train_top_features, test_top_features = gen_mb_top_page_module_features(TRAIN_MB_PAGEVIEW_DTL_data, A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集Top特征数量: {train_top_features.shape[1] - 1}\")\n",
    "print(f\"测试集Top特征数量: {test_top_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\nTop特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "25a13ef8",
   "metadata": {},
   "source": [
    "## 5. 时间段行为特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a4043162",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集时间段特征...\n",
      "生成月度统计特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "月度特征: 100%|██████████| 3/3 [00:00<00:00, 157.21it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成工作日/周末特征...\n",
      "生成月初/月末特征...\n",
      "生成周几特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "周几特征: 100%|██████████| 7/7 [00:00<00:00, 655.83it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成时段特征...\n",
      "训练集时间段特征数量: 35\\n\n",
      "生成测试集时间段特征...\n",
      "生成月度统计特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "月度特征: 100%|██████████| 3/3 [00:00<00:00, 211.60it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成工作日/周末特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成月初/月末特征...\n",
      "生成周几特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "周几特征: 100%|██████████| 7/7 [00:00<00:00, 706.42it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成时段特征...\n",
      "测试集时间段特征数量: 35\n",
      "\\n时间段特征生成完成!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_time_period_features(df):\n",
    "    \"\"\"\n",
    "    按不同时间段统计访问行为\n",
    "    包括: 月份、周末/工作日、月初/月末、周几等\n",
    "    \"\"\"\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    print(\"生成月度统计特征...\")\n",
    "    # 按月份统计\n",
    "    for month in tqdm([0, 1, 2], desc='月度特征'):\n",
    "        df_month = df[df['date_months_to_now'] == month]\n",
    "        \n",
    "        # 该月访问次数\n",
    "        tmp = df_month.groupby('CUST_NO').size().to_frame(f'month_{month}_visit_cnt').reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 该月访问页面数\n",
    "        tmp = df_month.groupby('CUST_NO')['CUR_PAGE'].nunique().to_frame(\n",
    "            f'month_{month}_page_nunique'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 该月访问模块数\n",
    "        tmp = df_month.groupby('CUST_NO')['MOD_NAME'].nunique().to_frame(\n",
    "            f'month_{month}_module_nunique'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 该月活跃天数\n",
    "        tmp = df_month.groupby('CUST_NO')['date_days_to_now'].nunique().to_frame(\n",
    "            f'month_{month}_active_days'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 该月事件数\n",
    "        tmp = df_month.groupby('CUST_NO')['EVENT_NAME'].nunique().to_frame(\n",
    "            f'month_{month}_event_nunique'\n",
    "        ).reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成工作日/周末特征...\")\n",
    "    # 工作日 vs 周末\n",
    "    df_weekday = df[df['opr_is_weekend'] == 0]\n",
    "    df_weekend = df[df['opr_is_weekend'] == 1]\n",
    "    \n",
    "    # 工作日统计\n",
    "    tmp = df_weekday.groupby('CUST_NO').agg({\n",
    "        'CUR_PAGE': ['count', 'nunique'],\n",
    "        'MOD_NAME': 'nunique'\n",
    "    })\n",
    "    tmp.columns = ['weekday_visit_cnt', 'weekday_page_nunique', 'weekday_module_nunique']\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周末统计\n",
    "    tmp = df_weekend.groupby('CUST_NO').agg({\n",
    "        'CUR_PAGE': ['count', 'nunique'],\n",
    "        'MOD_NAME': 'nunique'\n",
    "    })\n",
    "    tmp.columns = ['weekend_visit_cnt', 'weekend_page_nunique', 'weekend_module_nunique']\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 工作日/周末比例\n",
    "    feature['weekday_weekend_cnt_ratio'] = feature['weekday_visit_cnt'] / (feature['weekend_visit_cnt'] + 1)\n",
    "    \n",
    "    print(\"生成月初/月末特征...\")\n",
    "    # 月初/月末行为\n",
    "    df_month_start = df[df['opr_is_month_start'] == 1]\n",
    "    df_month_end = df[df['opr_is_month_end'] == 1]\n",
    "    \n",
    "    # 月初访问次数\n",
    "    tmp = df_month_start.groupby('CUST_NO').size().to_frame('month_start_visit_cnt').reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月末访问次数\n",
    "    tmp = df_month_end.groupby('CUST_NO').size().to_frame('month_end_visit_cnt').reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成周几特征...\")\n",
    "    # 按周几统计\n",
    "    for dayofweek in tqdm(range(7), desc='周几特征'):\n",
    "        df_day = df[df['opr_dayofweek'] == dayofweek]\n",
    "        tmp = df_day.groupby('CUST_NO').size().to_frame(f'dayofweek_{dayofweek}_cnt').reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 时段特征(如果有时间数据)\n",
    "    if 'opr_hour' in df.columns:\n",
    "        print(\"生成时段特征...\")\n",
    "        time_periods = [\n",
    "            (0, 6, 'night'),\n",
    "            (6, 12, 'morning'),\n",
    "            (12, 18, 'afternoon'),\n",
    "            (18, 24, 'evening')\n",
    "        ]\n",
    "        for start_hour, end_hour, period_name in time_periods:\n",
    "            df_time = df[(df['opr_hour'] >= start_hour) & (df['opr_hour'] < end_hour)]\n",
    "            tmp = df_time.groupby('CUST_NO').size().to_frame(f'time_{period_name}_cnt').reset_index()\n",
    "            feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    return feature\n",
    "\n",
    "# 生成训练集和测试集的时间段特征\n",
    "print(\"\\\\n生成训练集时间段特征...\")\n",
    "train_time_features = gen_mb_time_period_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集时间段特征数量: {train_time_features.shape[1] - 1}\\\\n\")\n",
    "\n",
    "print(\"生成测试集时间段特征...\")\n",
    "test_time_features = gen_mb_time_period_features(A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"测试集时间段特征数量: {test_time_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\n时间段特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "27525e66",
   "metadata": {},
   "source": [
    "## 6. 用户访问习惯特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "fa96c0b2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集用户习惯特征...\n",
      "生成整体访问统计...\n",
      "生成访问频率特征...\n",
      "生成页面浏览深度特征...\n",
      "生成模块切换频率特征...\n",
      "生成访问平均间隔特征...\n",
      "训练集用户习惯特征数量: 30\\n\n",
      "生成测试集用户习惯特征...\n",
      "生成整体访问统计...\n",
      "生成访问频率特征...\n",
      "生成页面浏览深度特征...\n",
      "生成模块切换频率特征...\n",
      "生成访问平均间隔特征...\n",
      "测试集用户习惯特征数量: 30\n",
      "\\n用户习惯特征生成完成!\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_user_habit_features(df):\n",
    "    \"\"\"\n",
    "    挖掘用户的访问习惯和偏好特征\n",
    "    \"\"\"\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    print(\"生成整体访问统计...\")\n",
    "    # 整体访问统计\n",
    "    tmp = df.groupby('CUST_NO').agg({\n",
    "        'CUR_PAGE': ['count', 'nunique'],\n",
    "        'LAST_PAGE': 'nunique',\n",
    "        'MOD_NAME': 'nunique',\n",
    "        'EVENT_NAME': 'nunique',\n",
    "        'date_days_to_now': ['min', 'max', 'mean', 'std', 'nunique']\n",
    "    })\n",
    "    tmp.columns = [\n",
    "        'total_visit_cnt', 'total_cur_page_nunique', 'total_last_page_nunique',\n",
    "        'total_module_nunique', 'total_event_nunique',\n",
    "        'visit_days_min', 'visit_days_max', 'visit_days_mean', 'visit_days_std', 'visit_active_days'\n",
    "    ]\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 衍生特征\n",
    "    feature['visit_days_span'] = feature['visit_days_max'] - feature['visit_days_min']\n",
    "    feature['visit_concentration'] = 1 / (feature['visit_days_std'] + 1)\n",
    "    feature['avg_pages_per_day'] = feature['total_cur_page_nunique'] / (feature['visit_days_span'] + 1)\n",
    "    feature['active_days_ratio'] = feature['visit_active_days'] / (feature['visit_days_span'] + 1)\n",
    "    \n",
    "    print(\"生成访问频率特征...\")\n",
    "    # 每个用户每天的访问次数\n",
    "    df_daily_visits = df.groupby(['CUST_NO', 'date_days_to_now']).size().reset_index(name='daily_visits')\n",
    "    \n",
    "    tmp = df_daily_visits.groupby('CUST_NO')['daily_visits'].agg([\n",
    "        'mean', 'max', 'min', 'std',\n",
    "        ('skew', lambda x: x.skew() if len(x) > 1 else 0),\n",
    "        ('kurt', lambda x: x.kurt() if len(x) > 1 else 0)\n",
    "    ])\n",
    "    tmp.columns = [f'daily_visits_{col}' for col in tmp.columns]\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成页面浏览深度特征...\")\n",
    "    # 用户最常访问的页面占比\n",
    "    df_page_freq = df.groupby(['CUST_NO', 'CUR_PAGE']).size().reset_index(name='page_cnt')\n",
    "    df_top1_page = df_page_freq.sort_values(['CUST_NO', 'page_cnt'], ascending=[True, False])\n",
    "    df_top1_page = df_top1_page.groupby('CUST_NO').first().reset_index()\n",
    "    \n",
    "    tmp_total = df.groupby('CUST_NO').size().to_frame('total').reset_index()\n",
    "    df_top1_page = df_top1_page.merge(tmp_total, on='CUST_NO', how='left')\n",
    "    df_top1_page['top1_page_ratio'] = df_top1_page['page_cnt'] / df_top1_page['total']\n",
    "    feature = feature.merge(df_top1_page[['CUST_NO', 'top1_page_ratio']], on='CUST_NO', how='left')\n",
    "    \n",
    "    # Top3页面占比\n",
    "    df_top3_page = df_page_freq.sort_values(['CUST_NO', 'page_cnt'], ascending=[True, False])\n",
    "    df_top3_page = df_top3_page.groupby('CUST_NO').head(3).groupby('CUST_NO')['page_cnt'].sum().to_frame('top3_page_cnt').reset_index()\n",
    "    df_top3_page = df_top3_page.merge(tmp_total, on='CUST_NO', how='left')\n",
    "    df_top3_page['top3_page_ratio'] = df_top3_page['top3_page_cnt'] / df_top3_page['total']\n",
    "    feature = feature.merge(df_top3_page[['CUST_NO', 'top3_page_ratio']], on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成模块切换频率特征...\")\n",
    "    # 模块切换频率\n",
    "    df_sorted = df.sort_values(['CUST_NO', 'OPR_DATE', 'date_days_to_now']).reset_index(drop=True)\n",
    "    df_sorted['next_module'] = df_sorted.groupby('CUST_NO')['MOD_NAME'].shift(-1)\n",
    "    df_sorted['module_switch'] = (\n",
    "        (df_sorted['MOD_NAME'] != df_sorted['next_module']) & \n",
    "        (df_sorted['CUST_NO'] == df_sorted.groupby('CUST_NO')['CUST_NO'].shift(-1))\n",
    "    ).astype(int)\n",
    "    \n",
    "    tmp = df_sorted.groupby('CUST_NO')['module_switch'].agg(['sum', 'mean'])\n",
    "    tmp.columns = ['module_switch_cnt', 'module_switch_ratio']\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 页面切换频率\n",
    "    df_sorted['next_page'] = df_sorted.groupby('CUST_NO')['CUR_PAGE'].shift(-1)\n",
    "    df_sorted['page_switch'] = (\n",
    "        (df_sorted['CUR_PAGE'] != df_sorted['next_page']) & \n",
    "        (df_sorted['CUST_NO'] == df_sorted.groupby('CUST_NO')['CUST_NO'].shift(-1))\n",
    "    ).astype(int)\n",
    "    \n",
    "    tmp = df_sorted.groupby('CUST_NO')['page_switch'].agg(['sum', 'mean'])\n",
    "    tmp.columns = ['page_switch_cnt', 'page_switch_ratio']\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成访问平均间隔特征...\")\n",
    "    # 访问平均间隔天数\n",
    "    df_sorted_by_date = df.sort_values(['CUST_NO', 'date_days_to_now']).reset_index(drop=True)\n",
    "    df_sorted_by_date['prev_visit_days'] = df_sorted_by_date.groupby('CUST_NO')['date_days_to_now'].shift(1)\n",
    "    df_sorted_by_date['visit_interval'] = df_sorted_by_date['prev_visit_days'] - df_sorted_by_date['date_days_to_now']\n",
    "    \n",
    "    tmp = df_sorted_by_date.groupby('CUST_NO')['visit_interval'].agg(['mean', 'max', 'min', 'std'])\n",
    "    tmp.columns = [f'visit_interval_{col}' for col in tmp.columns]\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    return feature\n",
    "\n",
    "# 生成训练集和测试集的用户习惯特征\n",
    "print(\"\\\\n生成训练集用户习惯特征...\")\n",
    "train_habit_features = gen_mb_user_habit_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集用户习惯特征数量: {train_habit_features.shape[1] - 1}\\\\n\")\n",
    "\n",
    "print(\"生成测试集用户习惯特征...\")\n",
    "test_habit_features = gen_mb_user_habit_features(A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"测试集用户习惯特征数量: {test_habit_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\n用户习惯特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9e012da5",
   "metadata": {},
   "source": [
    "## 7. 趋势变化特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "c3ccbcd0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集趋势变化特征...\n",
      "生成月度趋势特征...\n",
      "生成月度变化率特征...\n",
      "生成月度占比特征...\n",
      "生成趋势方向特征...\n",
      "生成周度趋势特征...\n",
      "训练集趋势特征数量: 36\\n\n",
      "生成测试集趋势变化特征...\n",
      "生成月度趋势特征...\n",
      "生成月度变化率特征...\n",
      "生成月度占比特征...\n",
      "生成趋势方向特征...\n",
      "生成周度趋势特征...\n",
      "测试集趋势特征数量: 36\n",
      "\\n趋势特征生成完成!\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_trend_features(df):\n",
    "    \"\"\"\n",
    "    捕捉用户访问行为的趋势变化\n",
    "    比较不同月份之间的变化\n",
    "    \"\"\"\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    print(\"生成月度趋势特征...\")\n",
    "    # 月度访问趋势\n",
    "    month_stats = {}\n",
    "    for month in [0, 1, 2]:\n",
    "        df_month = df[df['date_months_to_now'] == month]\n",
    "        tmp = df_month.groupby('CUST_NO').agg({\n",
    "            'CUR_PAGE': ['count', 'nunique'],\n",
    "            'MOD_NAME': 'nunique',\n",
    "            'EVENT_NAME': 'nunique'\n",
    "        })\n",
    "        tmp.columns = [f'm{month}_visit_cnt', f'm{month}_page_nunique', \n",
    "                      f'm{month}_module_nunique', f'm{month}_event_nunique']\n",
    "        month_stats[month] = tmp.reset_index()\n",
    "    \n",
    "    # 合并月度统计\n",
    "    for month in [0, 1, 2]:\n",
    "        feature = feature.merge(month_stats[month], on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成月度变化率特征...\")\n",
    "    # 月度变化率\n",
    "    # 最近月(m0)相对次近月(m1)的变化\n",
    "    feature['m0_m1_visit_change'] = (feature['m0_visit_cnt'] - feature['m1_visit_cnt']) / (feature['m1_visit_cnt'] + 1)\n",
    "    feature['m0_m1_page_change'] = (feature['m0_page_nunique'] - feature['m1_page_nunique']) / (feature['m1_page_nunique'] + 1)\n",
    "    feature['m0_m1_module_change'] = (feature['m0_module_nunique'] - feature['m1_module_nunique']) / (feature['m1_module_nunique'] + 1)\n",
    "    \n",
    "    # 次近月(m1)相对最早月(m2)的变化\n",
    "    feature['m1_m2_visit_change'] = (feature['m1_visit_cnt'] - feature['m2_visit_cnt']) / (feature['m2_visit_cnt'] + 1)\n",
    "    feature['m1_m2_page_change'] = (feature['m1_page_nunique'] - feature['m2_page_nunique']) / (feature['m2_page_nunique'] + 1)\n",
    "    feature['m1_m2_module_change'] = (feature['m1_module_nunique'] - feature['m2_module_nunique']) / (feature['m2_module_nunique'] + 1)\n",
    "    \n",
    "    # 最近月(m0)相对最早月(m2)的变化\n",
    "    feature['m0_m2_visit_change'] = (feature['m0_visit_cnt'] - feature['m2_visit_cnt']) / (feature['m2_visit_cnt'] + 1)\n",
    "    feature['m0_m2_page_change'] = (feature['m0_page_nunique'] - feature['m2_page_nunique']) / (feature['m2_page_nunique'] + 1)\n",
    "    \n",
    "    print(\"生成月度占比特征...\")\n",
    "    # 月度占比\n",
    "    feature['total_3months_visit'] = feature['m0_visit_cnt'].fillna(0) + feature['m1_visit_cnt'].fillna(0) + feature['m2_visit_cnt'].fillna(0)\n",
    "    feature['m0_visit_ratio'] = feature['m0_visit_cnt'] / (feature['total_3months_visit'] + 1)\n",
    "    feature['m1_visit_ratio'] = feature['m1_visit_cnt'] / (feature['total_3months_visit'] + 1)\n",
    "    feature['m2_visit_ratio'] = feature['m2_visit_cnt'] / (feature['total_3months_visit'] + 1)\n",
    "    \n",
    "    print(\"生成趋势方向特征...\")\n",
    "    # 趋势方向: 判断是上升、下降还是波动\n",
    "    feature['trend_direction'] = 0\n",
    "    feature.loc[(feature['m0_visit_cnt'] > feature['m1_visit_cnt']) & \n",
    "                (feature['m1_visit_cnt'] > feature['m2_visit_cnt']), 'trend_direction'] = 1  # 持续上升\n",
    "    feature.loc[(feature['m0_visit_cnt'] < feature['m1_visit_cnt']) & \n",
    "                (feature['m1_visit_cnt'] < feature['m2_visit_cnt']), 'trend_direction'] = -1  # 持续下降\n",
    "    \n",
    "    # 访问稳定性\n",
    "    feature['visit_stability'] = feature[['m0_visit_cnt', 'm1_visit_cnt', 'm2_visit_cnt']].std(axis=1) / \\\n",
    "                                 (feature[['m0_visit_cnt', 'm1_visit_cnt', 'm2_visit_cnt']].mean(axis=1) + 1)\n",
    "    \n",
    "    print(\"生成周度趋势特征...\")\n",
    "    # 周度趋势(最近4周)\n",
    "    for week in range(4):\n",
    "        df_week = df[df['date_weeks_to_now'] == week]\n",
    "        tmp = df_week.groupby('CUST_NO').agg({\n",
    "            'CUR_PAGE': ['count', 'nunique']\n",
    "        })\n",
    "        tmp.columns = [f'w{week}_visit_cnt', f'w{week}_page_nunique']\n",
    "        tmp = tmp.reset_index()\n",
    "        feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周度变化率\n",
    "    feature['w0_w1_visit_change'] = (feature['w0_visit_cnt'] - feature['w1_visit_cnt']) / (feature['w1_visit_cnt'] + 1)\n",
    "    feature['w1_w2_visit_change'] = (feature['w1_visit_cnt'] - feature['w2_visit_cnt']) / (feature['w2_visit_cnt'] + 1)\n",
    "    \n",
    "    return feature\n",
    "\n",
    "# 生成训练集和测试集的趋势特征\n",
    "print(\"\\\\n生成训练集趋势变化特征...\")\n",
    "train_trend_features = gen_mb_trend_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集趋势特征数量: {train_trend_features.shape[1] - 1}\\\\n\")\n",
    "\n",
    "print(\"生成测试集趋势变化特征...\")\n",
    "test_trend_features = gen_mb_trend_features(A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"测试集趋势特征数量: {test_trend_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\n趋势特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a4d5c7b2",
   "metadata": {},
   "source": [
    "## 8. 事件交互特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "dc87c5c8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集和测试集事件交互特征...\n",
      "生成事件统计特征...\n",
      "生成Top事件特征...\n",
      "使用训练集Top 2 事件: ['0b7ff5ad8f991ce1fc7fb84236a9eb82', '1150705395f974a6cc2e09e6a9b11bed']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Top事件特征: 100%|██████████| 2/2 [00:00<00:00, 127.38it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成模块-事件组合特征...\n",
      "生成页面-事件组合特征...\n",
      "训练集事件特征数量: 13\n",
      "测试集事件特征数量: 13\n",
      "\\n事件交互特征生成完成!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_event_features(df_train, df_test):\n",
    "    \"\"\"\n",
    "    生成事件交互相关特征\n",
    "    使用训练集的Top事件列表确保训练集和测试集特征一致\n",
    "    \n",
    "    Args:\n",
    "        df_train: 训练集数据\n",
    "        df_test: 测试集数据\n",
    "    \n",
    "    Returns:\n",
    "        feature_train: 训练集特征\n",
    "        feature_test: 测试集特征\n",
    "    \"\"\"\n",
    "    # 初始化特征DataFrame\n",
    "    feature_train = df_train[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    feature_test = df_test[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    print(\"生成事件统计特征...\")\n",
    "    # 训练集事件总体统计\n",
    "    tmp_train = df_train.groupby('CUST_NO')['EVENT_NAME'].agg([\n",
    "        ('event_cnt', 'count'),\n",
    "        ('event_nunique', 'nunique')\n",
    "    ]).reset_index()\n",
    "    feature_train = feature_train.merge(tmp_train, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 测试集事件总体统计\n",
    "    tmp_test = df_test.groupby('CUST_NO')['EVENT_NAME'].agg([\n",
    "        ('event_cnt', 'count'),\n",
    "        ('event_nunique', 'nunique')\n",
    "    ]).reset_index()\n",
    "    feature_test = feature_test.merge(tmp_test, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 事件多样性\n",
    "    feature_train['event_diversity'] = feature_train['event_nunique'] / (feature_train['event_cnt'] + 1)\n",
    "    feature_test['event_diversity'] = feature_test['event_nunique'] / (feature_test['event_cnt'] + 1)\n",
    "    \n",
    "    print(\"生成Top事件特征...\")\n",
    "    # *** 关键:使用训练集的Top 20事件作为特征空间 ***\n",
    "    top_events = df_train['EVENT_NAME'].value_counts().head(20).index.tolist()\n",
    "    print(f\"使用训练集Top {len(top_events)} 事件: {top_events}\")\n",
    "    \n",
    "    for event in tqdm(top_events, desc='Top事件特征'):\n",
    "        # 训练集事件特征\n",
    "        df_event_train = df_train[df_train['EVENT_NAME'] == event]\n",
    "        tmp = df_event_train.groupby('CUST_NO').size().to_frame(f'event_{event}_cnt').reset_index()\n",
    "        feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp_total = df_train.groupby('CUST_NO').size().to_frame('total').reset_index()\n",
    "        tmp_event = df_event_train.groupby('CUST_NO').size().to_frame(f'event_{event}_ratio').reset_index()\n",
    "        tmp = tmp_total.merge(tmp_event, on='CUST_NO', how='left')\n",
    "        tmp[f'event_{event}_ratio'] = tmp[f'event_{event}_ratio'] / (tmp['total'] + 1)\n",
    "        feature_train = feature_train.merge(tmp[['CUST_NO', f'event_{event}_ratio']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 测试集事件特征(使用相同的事件列表)\n",
    "        df_event_test = df_test[df_test['EVENT_NAME'] == event]\n",
    "        tmp = df_event_test.groupby('CUST_NO').size().to_frame(f'event_{event}_cnt').reset_index()\n",
    "        feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "        \n",
    "        tmp_total = df_test.groupby('CUST_NO').size().to_frame('total').reset_index()\n",
    "        tmp_event = df_event_test.groupby('CUST_NO').size().to_frame(f'event_{event}_ratio').reset_index()\n",
    "        tmp = tmp_total.merge(tmp_event, on='CUST_NO', how='left')\n",
    "        tmp[f'event_{event}_ratio'] = tmp[f'event_{event}_ratio'] / (tmp['total'] + 1)\n",
    "        feature_test = feature_test.merge(tmp[['CUST_NO', f'event_{event}_ratio']], on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成模块-事件组合特征...\")\n",
    "    # 训练集模块-事件组合\n",
    "    df_train_copy = df_train.copy()\n",
    "    df_train_copy['mod_event'] = df_train_copy['MOD_NAME'].astype(str) + '_' + df_train_copy['EVENT_NAME'].astype(str)\n",
    "    tmp = df_train_copy.groupby('CUST_NO')['mod_event'].agg([\n",
    "        ('mod_event_nunique', 'nunique'),\n",
    "        ('mod_event_cnt', 'count')\n",
    "    ]).reset_index()\n",
    "    feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "    feature_train['mod_event_diversity'] = feature_train['mod_event_nunique'] / (feature_train['mod_event_cnt'] + 1)\n",
    "    \n",
    "    # 测试集模块-事件组合\n",
    "    df_test_copy = df_test.copy()\n",
    "    df_test_copy['mod_event'] = df_test_copy['MOD_NAME'].astype(str) + '_' + df_test_copy['EVENT_NAME'].astype(str)\n",
    "    tmp = df_test_copy.groupby('CUST_NO')['mod_event'].agg([\n",
    "        ('mod_event_nunique', 'nunique'),\n",
    "        ('mod_event_cnt', 'count')\n",
    "    ]).reset_index()\n",
    "    feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    feature_test['mod_event_diversity'] = feature_test['mod_event_nunique'] / (feature_test['mod_event_cnt'] + 1)\n",
    "    \n",
    "    print(\"生成页面-事件组合特征...\")\n",
    "    # 训练集页面-事件组合\n",
    "    df_train_copy['page_event'] = df_train_copy['CUR_PAGE'].astype(str) + '_' + df_train_copy['EVENT_NAME'].astype(str)\n",
    "    tmp = df_train_copy.groupby('CUST_NO')['page_event'].agg([\n",
    "        ('page_event_nunique', 'nunique'),\n",
    "        ('page_event_cnt', 'count')\n",
    "    ]).reset_index()\n",
    "    feature_train = feature_train.merge(tmp, on='CUST_NO', how='left')\n",
    "    feature_train['page_event_diversity'] = feature_train['page_event_nunique'] / (feature_train['page_event_cnt'] + 1)\n",
    "    \n",
    "    # 测试集页面-事件组合\n",
    "    df_test_copy['page_event'] = df_test_copy['CUR_PAGE'].astype(str) + '_' + df_test_copy['EVENT_NAME'].astype(str)\n",
    "    tmp = df_test_copy.groupby('CUST_NO')['page_event'].agg([\n",
    "        ('page_event_nunique', 'nunique'),\n",
    "        ('page_event_cnt', 'count')\n",
    "    ]).reset_index()\n",
    "    feature_test = feature_test.merge(tmp, on='CUST_NO', how='left')\n",
    "    feature_test['page_event_diversity'] = feature_test['page_event_nunique'] / (feature_test['page_event_cnt'] + 1)\n",
    "    \n",
    "    # 验证特征数量一致性\n",
    "    assert feature_train.shape[1] == feature_test.shape[1], \\\n",
    "        f\"训练集特征数({feature_train.shape[1]})与测试集特征数({feature_test.shape[1]})不一致!\"\n",
    "    \n",
    "    return feature_train, feature_test\n",
    "\n",
    "# 生成训练集和测试集的事件特征\n",
    "print(\"\\\\n生成训练集和测试集事件交互特征...\")\n",
    "train_event_features, test_event_features = gen_mb_event_features(TRAIN_MB_PAGEVIEW_DTL_data, A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集事件特征数量: {train_event_features.shape[1] - 1}\")\n",
    "print(f\"测试集事件特征数量: {test_event_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\n事件交互特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "09b1e591",
   "metadata": {},
   "source": [
    "## 9. 会话深度特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "c70b3cc9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\n生成训练集会话深度特征...\n",
      "生成会话统计特征...\n",
      "生成会话长度特征...\n",
      "生成会话集中度特征...\n",
      "训练集会话特征数量: 19\\n\n",
      "生成测试集会话深度特征...\n",
      "生成会话统计特征...\n",
      "生成会话长度特征...\n",
      "生成会话集中度特征...\n",
      "测试集会话特征数量: 19\n",
      "\\n会话特征生成完成!\n"
     ]
    }
   ],
   "source": [
    "def gen_mb_session_features(df):\n",
    "    \"\"\"\n",
    "    生成会话深度特征\n",
    "    将同一天的访问记录视为一个会话\n",
    "    \"\"\"\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    print(\"生成会话统计特征...\")\n",
    "    # 按天(会话)聚合\n",
    "    df_session = df.groupby(['CUST_NO', 'date_days_to_now']).agg({\n",
    "        'CUR_PAGE': ['count', 'nunique'],\n",
    "        'MOD_NAME': 'nunique',\n",
    "        'EVENT_NAME': 'nunique'\n",
    "    })\n",
    "    df_session.columns = ['session_page_cnt', 'session_page_nunique', 'session_mod_nunique', 'session_event_nunique']\n",
    "    df_session = df_session.reset_index()\n",
    "    \n",
    "    # 会话统计\n",
    "    tmp = df_session.groupby('CUST_NO').agg({\n",
    "        'session_page_cnt': ['mean', 'max', 'min', 'std'],\n",
    "        'session_page_nunique': ['mean', 'max', 'min', 'std'],\n",
    "        'session_mod_nunique': ['mean', 'max', 'min'],\n",
    "        'session_event_nunique': ['mean', 'max', 'min']\n",
    "    })\n",
    "    tmp.columns = ['_'.join(col).strip() for col in tmp.columns.values]\n",
    "    tmp = tmp.reset_index()\n",
    "    feature = feature.merge(tmp, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(\"生成会话长度特征...\")\n",
    "    # 会话长度(每个会话的页面访问数)\n",
    "    feature['avg_session_length'] = feature['session_page_cnt_mean']\n",
    "    feature['max_session_length'] = feature['session_page_cnt_max']\n",
    "    \n",
    "    # 会话深度(每个会话访问的不同页面数)\n",
    "    feature['avg_session_depth'] = feature['session_page_nunique_mean']\n",
    "    feature['max_session_depth'] = feature['session_page_nunique_max']\n",
    "    \n",
    "    print(\"生成会话集中度特征...\")\n",
    "    # 会话集中度\n",
    "    feature['session_concentration'] = 1 / (feature['session_page_cnt_std'] + 1)\n",
    "    \n",
    "    return feature\n",
    "\n",
    "# 生成训练集和测试集的会话特征\n",
    "print(\"\\\\n生成训练集会话深度特征...\")\n",
    "train_session_features = gen_mb_session_features(TRAIN_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"训练集会话特征数量: {train_session_features.shape[1] - 1}\\\\n\")\n",
    "\n",
    "print(\"生成测试集会话深度特征...\")\n",
    "test_session_features = gen_mb_session_features(A_MB_PAGEVIEW_DTL_data)\n",
    "print(f\"测试集会话特征数量: {test_session_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\\\n会话特征生成完成!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7cac9efc",
   "metadata": {},
   "source": [
    "## 10. 特征合并与保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "ff9efe3b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "============================================================\n",
      "开始合并所有特征...\n",
      "============================================================\n",
      "\\n合并训练集特征...\n",
      "1. RFM特征: 34 个\n",
      "2. 路径特征: 103 个\n",
      "3. Top特征: 280 个\n",
      "4. 时间段特征: 35 个\n",
      "5. 用户习惯特征: 30 个\n",
      "6. 趋势特征: 36 个\n",
      "7. 事件特征: 13 个\n",
      "8. 会话特征: 19 个\n",
      "\\n训练集总特征数: 550\n",
      "训练集样本数: 56\n",
      "\\n============================================================\n",
      "合并测试集特征...\n",
      "============================================================\n",
      "1. RFM特征: 34 个\n",
      "2. 路径特征: 103 个\n",
      "3. Top特征: 280 个\n",
      "4. 时间段特征: 35 个\n",
      "5. 用户习惯特征: 30 个\n",
      "6. 趋势特征: 36 个\n",
      "7. 事件特征: 13 个\n",
      "8. 会话特征: 19 个\n",
      "\\n测试集总特征数: 550\n",
      "测试集样本数: 95\n",
      "\\n============================================================\n",
      "特征合并完成!\n",
      "============================================================\n"
     ]
    }
   ],
   "source": [
    "print(\"=\" * 60)\n",
    "print(\"开始合并所有特征...\")\n",
    "print(\"=\" * 60)\n",
    "\n",
    "# 合并训练集所有特征\n",
    "print(\"\\\\n合并训练集特征...\")\n",
    "train_mb_features = train_rfm_features.copy()\n",
    "print(f\"1. RFM特征: {train_rfm_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_path_features, on='CUST_NO', how='left')\n",
    "print(f\"2. 路径特征: {train_path_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_top_features, on='CUST_NO', how='left')\n",
    "print(f\"3. Top特征: {train_top_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_time_features, on='CUST_NO', how='left')\n",
    "print(f\"4. 时间段特征: {train_time_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_habit_features, on='CUST_NO', how='left')\n",
    "print(f\"5. 用户习惯特征: {train_habit_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_trend_features, on='CUST_NO', how='left')\n",
    "print(f\"6. 趋势特征: {train_trend_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_event_features, on='CUST_NO', how='left')\n",
    "print(f\"7. 事件特征: {train_event_features.shape[1] - 1} 个\")\n",
    "\n",
    "train_mb_features = train_mb_features.merge(train_session_features, on='CUST_NO', how='left')\n",
    "print(f\"8. 会话特征: {train_session_features.shape[1] - 1} 个\")\n",
    "\n",
    "print(f\"\\\\n训练集总特征数: {train_mb_features.shape[1] - 1}\")\n",
    "print(f\"训练集样本数: {train_mb_features.shape[0]}\")\n",
    "\n",
    "# 合并测试集所有特征\n",
    "print(\"\\\\n\" + \"=\" * 60)\n",
    "print(\"合并测试集特征...\")\n",
    "print(\"=\" * 60)\n",
    "\n",
    "test_mb_features = test_rfm_features.copy()\n",
    "print(f\"1. RFM特征: {test_rfm_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_path_features, on='CUST_NO', how='left')\n",
    "print(f\"2. 路径特征: {test_path_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_top_features, on='CUST_NO', how='left')\n",
    "print(f\"3. Top特征: {test_top_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_time_features, on='CUST_NO', how='left')\n",
    "print(f\"4. 时间段特征: {test_time_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_habit_features, on='CUST_NO', how='left')\n",
    "print(f\"5. 用户习惯特征: {test_habit_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_trend_features, on='CUST_NO', how='left')\n",
    "print(f\"6. 趋势特征: {test_trend_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_event_features, on='CUST_NO', how='left')\n",
    "print(f\"7. 事件特征: {test_event_features.shape[1] - 1} 个\")\n",
    "\n",
    "test_mb_features = test_mb_features.merge(test_session_features, on='CUST_NO', how='left')\n",
    "print(f\"8. 会话特征: {test_session_features.shape[1] - 1} 个\")\n",
    "\n",
    "print(f\"\\\\n测试集总特征数: {test_mb_features.shape[1] - 1}\")\n",
    "print(f\"测试集样本数: {test_mb_features.shape[0]}\")\n",
    "\n",
    "print(\"\\\\n\" + \"=\" * 60)\n",
    "print(\"特征合并完成!\")\n",
    "print(\"=\" * 60)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9f1a0fa7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确保特征保存目录存在\n",
    "feature_dir = './feature'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征保存目录: {feature_dir}\")\n",
    "\n",
    "# 保存训练集特征\n",
    "train_feature_path = os.path.join(feature_dir, 'train_mb_pageview_features.pkl')\n",
    "with open(train_feature_path, 'wb') as f:\n",
    "    pickle.dump(train_mb_features, f)\n",
    "print(f\"\\\\n训练集特征已保存至: {train_feature_path}\")\n",
    "\n",
    "# 保存测试集特征\n",
    "test_feature_path = os.path.join(feature_dir, 'test_mb_pageview_features.pkl')\n",
    "with open(test_feature_path, 'wb') as f:\n",
    "    pickle.dump(test_mb_features, f)\n",
    "print(f\"测试集特征已保存至: {test_feature_path}\")\n",
    "\n",
    "print(\"\\\\n\" + \"=\" * 60)\n",
    "print(\"所有特征保存完成!\")\n",
    "print(\"=\" * 60)\n",
    "\n",
    "# 显示部分特征\n",
    "print(\"\\\\n训练集特征预览:\")\n",
    "print(train_mb_features.head())\n",
    "\n",
    "print(\"\\\\n特征名称列表(前50个):\")\n",
    "feature_cols = [col for col in train_mb_features.columns if col != 'CUST_NO']\n",
    "for i, col in enumerate(feature_cols[:50], 1):\n",
    "    print(f\"{i}. {col}\")\n",
    "\n",
    "if len(feature_cols) > 50:\n",
    "    print(f\"\\\\n... 还有 {len(feature_cols) - 50} 个特征未显示\")\n",
    "\n",
    "print(f\"\\\\n特征工程全部完成!\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
