{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f8da670f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "b0312dc9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8a56037",
   "metadata": {},
   "source": [
    "# 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e1c98dc",
   "metadata": {},
   "source": [
    "## 数据导入通用函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "869edf9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ee0084d",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a5bddf6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 ASSET_data 已加载为 DataFrame\n",
      "数据集 CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 NATURE_data 已加载为 DataFrame\n",
      "数据集 PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TARGET_data 已加载为 DataFrame\n",
      "数据集 TARGET_VALID_data 已加载为 DataFrame\n",
      "数据集 MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 NATURE_data 已加载为 DataFrame\n",
      "数据集 PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TARGET_data 已加载为 DataFrame\n",
      "数据集 TARGET_VALID_data 已加载为 DataFrame\n",
      "数据集 TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TR_TPAY_data 已加载为 DataFrame\n",
      "数据集 TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = '../DATA'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ed726c42",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ae4547b",
   "metadata": {},
   "source": [
    "## 活期交易表数据准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "19721649",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "原始数据形状: (345313, 6)\n",
      "\n",
      "数据字段:\n",
      "['APSDTRDAT', 'CUST_NO', 'APSDTRCOD', 'APSDTRAMT', 'APSDABS', 'APSDTRCHL']\n",
      "\n",
      "数据预览:\n",
      "   APSDTRDAT                           CUST_NO  \\\n",
      "0   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "1   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "2   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "3   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "4   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "\n",
      "                          APSDTRCOD  APSDTRAMT  \\\n",
      "0  566a1fdfd622806c20378b970c4cbff3    60000.0   \n",
      "1  566a1fdfd622806c20378b970c4cbff3     2000.0   \n",
      "2  566a1fdfd622806c20378b970c4cbff3     1000.0   \n",
      "3  566a1fdfd622806c20378b970c4cbff3     -100.0   \n",
      "4  566a1fdfd622806c20378b970c4cbff3     2000.0   \n",
      "\n",
      "                            APSDABS                         APSDTRCHL  \n",
      "0  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "1  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "2  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "3  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "4  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "\n",
      "数据基本信息:\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 345313 entries, 0 to 345312\n",
      "Data columns (total 6 columns):\n",
      " #   Column     Non-Null Count   Dtype  \n",
      "---  ------     --------------   -----  \n",
      " 0   APSDTRDAT  345313 non-null  int64  \n",
      " 1   CUST_NO    345313 non-null  object \n",
      " 2   APSDTRCOD  345313 non-null  object \n",
      " 3   APSDTRAMT  345313 non-null  float64\n",
      " 4   APSDABS    345313 non-null  object \n",
      " 5   APSDTRCHL  345313 non-null  object \n",
      "dtypes: float64(1), int64(1), object(4)\n",
      "memory usage: 15.8+ MB\n",
      "None\n"
     ]
    }
   ],
   "source": [
    "# 复制数据并进行基础处理\n",
    "tr_aps = TR_APS_DTL_data.copy()\n",
    "\n",
    "print(\"原始数据形状:\", tr_aps.shape)\n",
    "print(\"\\n数据字段:\")\n",
    "print(tr_aps.columns.tolist())\n",
    "print(\"\\n数据预览:\")\n",
    "print(tr_aps.head())\n",
    "print(\"\\n数据基本信息:\")\n",
    "print(tr_aps.info())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "5d746914",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "处理后数据形状: (345313, 15)\n",
      "日期范围: 2025-04-01 00:00:00 至 2025-06-30 00:00:00\n",
      "总天数: 90天\n",
      "客户数: 5,616\n"
     ]
    }
   ],
   "source": [
    "# 数据预处理\n",
    "# 1. 转换日期格式\n",
    "tr_aps['APSDTRDAT'] = pd.to_datetime(tr_aps['APSDTRDAT'].astype(str), format='%Y%m%d')\n",
    "\n",
    "# 2. 计算距今天数\n",
    "end_date = tr_aps['APSDTRDAT'].max()\n",
    "tr_aps['days_to_now'] = (end_date - tr_aps['APSDTRDAT']).dt.days\n",
    "\n",
    "# 3. 提取时间特征\n",
    "tr_aps['year'] = tr_aps['APSDTRDAT'].dt.year\n",
    "tr_aps['month'] = tr_aps['APSDTRDAT'].dt.month\n",
    "tr_aps['day'] = tr_aps['APSDTRDAT'].dt.day\n",
    "tr_aps['weekday'] = tr_aps['APSDTRDAT'].dt.weekday  # 0=周一, 6=周日\n",
    "tr_aps['is_weekend'] = tr_aps['weekday'].isin([5, 6]).astype(int)\n",
    "tr_aps['week_of_year'] = tr_aps['APSDTRDAT'].dt.isocalendar().week\n",
    "\n",
    "# 4. 交易金额绝对值\n",
    "tr_aps['APSDTRAMT_abs'] = tr_aps['APSDTRAMT'].abs()\n",
    "\n",
    "# 5. 交易方向标识\n",
    "tr_aps['is_income'] = (tr_aps['APSDTRAMT'] >= 0).astype(int)  # 1=流入, 0=流出\n",
    "\n",
    "# 6. 排序\n",
    "tr_aps = tr_aps.sort_values(['CUST_NO', 'APSDTRDAT']).reset_index(drop=True)\n",
    "\n",
    "print(f\"处理后数据形状: {tr_aps.shape}\")\n",
    "print(f\"日期范围: {tr_aps['APSDTRDAT'].min()} 至 {tr_aps['APSDTRDAT'].max()}\")\n",
    "print(f\"总天数: {(tr_aps['APSDTRDAT'].max() - tr_aps['APSDTRDAT'].min()).days}天\")\n",
    "print(f\"客户数: {tr_aps['CUST_NO'].nunique():,}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8549bce3",
   "metadata": {},
   "source": [
    "## 特征工程函数定义"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ef2c3dcf",
   "metadata": {},
   "source": [
    "### 1. 时间窗口统计特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "f74f33d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "时间窗口统计特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def time_window_stats(df, group_col, agg_col, window_sizes, prefix, \n",
    "                      agg_funcs=['sum', 'mean', 'count', 'max', 'min', 'std', 'median']):\n",
    "    \"\"\"\n",
    "    时间窗口统计特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 数据框\n",
    "    - group_col: 分组字段 (如'CUST_NO')\n",
    "    - agg_col: 聚合字段 (如'APSDTRAMT_abs')\n",
    "    - window_sizes: 时间窗口列表 (如[1, 3, 7, 15, 30, 60, 90])\n",
    "    - prefix: 特征前缀\n",
    "    - agg_funcs: 聚合函数列表\n",
    "    \n",
    "    返回:\n",
    "    - 特征数据框\n",
    "    \"\"\"\n",
    "    result_list = []\n",
    "    \n",
    "    for window in window_sizes:\n",
    "        # 筛选窗口内数据\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        if len(sub_df) == 0:\n",
    "            continue\n",
    "            \n",
    "        # 基础统计\n",
    "        agg_dict = {}\n",
    "        for func in agg_funcs:\n",
    "            if func == 'count':\n",
    "                agg_dict[f'{prefix}_{window}d_{func}'] = pd.NamedAgg(column=agg_col, aggfunc='count')\n",
    "            elif func == 'median':\n",
    "                agg_dict[f'{prefix}_{window}d_{func}'] = pd.NamedAgg(column=agg_col, aggfunc=lambda x: x.median())\n",
    "            else:\n",
    "                agg_dict[f'{prefix}_{window}d_{func}'] = pd.NamedAgg(column=agg_col, aggfunc=func)\n",
    "        \n",
    "        window_features = sub_df.groupby(group_col).agg(**agg_dict).reset_index()\n",
    "        result_list.append(window_features)\n",
    "    \n",
    "    # 合并所有窗口特征\n",
    "    if len(result_list) > 0:\n",
    "        result = result_list[0]\n",
    "        for i in range(1, len(result_list)):\n",
    "            result = result.merge(result_list[i], on=group_col, how='outer')\n",
    "        return result\n",
    "    else:\n",
    "        return pd.DataFrame({group_col: df[group_col].unique()})\n",
    "\n",
    "print(\"时间窗口统计特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "19a7e536",
   "metadata": {},
   "source": [
    "### 2. 交易行为特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "4eda1135",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "交易行为特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def transaction_behavior_features(df, windows=[7, 15, 30, 60, 90]):\n",
    "    \"\"\"\n",
    "    生成交易行为特征\n",
    "    包括:活跃度、频率、金额分布等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        # 交易活跃天数\n",
    "        active_days = sub_df.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "        active_days.columns = ['CUST_NO', f'aps_active_days_{window}d']\n",
    "        features = features.merge(active_days, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易活跃度 = 交易天数 / 窗口天数\n",
    "        features[f'aps_activity_rate_{window}d'] = features[f'aps_active_days_{window}d'] / window\n",
    "        \n",
    "        # 日均交易笔数\n",
    "        txn_count = sub_df.groupby('CUST_NO').size().reset_index(name=f'aps_txn_count_{window}d')\n",
    "        features = features.merge(txn_count, on='CUST_NO', how='left')\n",
    "        features[f'aps_daily_txn_avg_{window}d'] = features[f'aps_txn_count_{window}d'] / window\n",
    "        \n",
    "        # 不同交易代码数量\n",
    "        cod_count = sub_df.groupby('CUST_NO')['APSDTRCOD'].nunique().reset_index()\n",
    "        cod_count.columns = ['CUST_NO', f'aps_unique_cod_{window}d']\n",
    "        features = features.merge(cod_count, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 不同交易渠道数量\n",
    "        chl_count = sub_df.groupby('CUST_NO')['APSDTRCHL'].nunique().reset_index()\n",
    "        chl_count.columns = ['CUST_NO', f'aps_unique_chl_{window}d']\n",
    "        features = features.merge(chl_count, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易金额变异系数 (CV = std / mean)\n",
    "        amt_stats = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].agg(['mean', 'std']).reset_index()\n",
    "        amt_stats[f'aps_amt_cv_{window}d'] = amt_stats['std'] / (amt_stats['mean'] + 1e-5)\n",
    "        features = features.merge(amt_stats[['CUST_NO', f'aps_amt_cv_{window}d']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 小额交易占比 (小于100)\n",
    "        small_txn = sub_df[sub_df['APSDTRAMT_abs'] < 100].groupby('CUST_NO').size().reset_index(name='small_count')\n",
    "        total_txn = sub_df.groupby('CUST_NO').size().reset_index(name='total_count')\n",
    "        txn_ratio = small_txn.merge(total_txn, on='CUST_NO', how='right')\n",
    "        txn_ratio[f'aps_small_txn_ratio_{window}d'] = txn_ratio['small_count'] / txn_ratio['total_count']\n",
    "        features = features.merge(txn_ratio[['CUST_NO', f'aps_small_txn_ratio_{window}d']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 大额交易占比 (大于10000)\n",
    "        large_txn = sub_df[sub_df['APSDTRAMT_abs'] > 10000].groupby('CUST_NO').size().reset_index(name='large_count')\n",
    "        txn_ratio = large_txn.merge(total_txn, on='CUST_NO', how='right')\n",
    "        txn_ratio[f'aps_large_txn_ratio_{window}d'] = txn_ratio['large_count'] / txn_ratio['total_count']\n",
    "        features = features.merge(txn_ratio[['CUST_NO', f'aps_large_txn_ratio_{window}d']], on='CUST_NO', how='left')\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"交易行为特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7523b2f8",
   "metadata": {},
   "source": [
    "### 3. 时序趋势特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "82858edb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "时序趋势特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def time_trend_features(df):\n",
    "    \"\"\"\n",
    "    生成时序趋势特征\n",
    "    包括:不同时间窗口对比、趋势变化等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    # 近期vs远期对比 (7天 vs 30天, 30天 vs 60天, 60天 vs 90天)\n",
    "    comparisons = [(7, 30), (30, 60), (60, 90), (7, 60)]\n",
    "    \n",
    "    for short_w, long_w in comparisons:\n",
    "        # 交易金额对比\n",
    "        short_amt = df[df['days_to_now'] < short_w].groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "        short_amt.columns = ['CUST_NO', 'short_amt']\n",
    "        \n",
    "        long_amt = df[df['days_to_now'] < long_w].groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "        long_amt.columns = ['CUST_NO', 'long_amt']\n",
    "        \n",
    "        compare = features[['CUST_NO']].merge(short_amt, on='CUST_NO', how='left')\n",
    "        compare = compare.merge(long_amt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 日均交易金额比率\n",
    "        compare[f'aps_amt_ratio_{short_w}d_{long_w}d'] = (compare['short_amt'] / short_w) / ((compare['long_amt'] / long_w) + 1e-5)\n",
    "        features = features.merge(compare[['CUST_NO', f'aps_amt_ratio_{short_w}d_{long_w}d']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易笔数对比\n",
    "        short_cnt = df[df['days_to_now'] < short_w].groupby('CUST_NO').size().reset_index(name='short_cnt')\n",
    "        long_cnt = df[df['days_to_now'] < long_w].groupby('CUST_NO').size().reset_index(name='long_cnt')\n",
    "        \n",
    "        compare = features[['CUST_NO']].merge(short_cnt, on='CUST_NO', how='left')\n",
    "        compare = compare.merge(long_cnt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 日均交易笔数比率\n",
    "        compare[f'aps_cnt_ratio_{short_w}d_{long_w}d'] = (compare['short_cnt'] / short_w) / ((compare['long_cnt'] / long_w) + 1e-5)\n",
    "        features = features.merge(compare[['CUST_NO', f'aps_cnt_ratio_{short_w}d_{long_w}d']], on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月度趋势特征\n",
    "    monthly_amt = df.groupby(['CUST_NO', 'month'])['APSDTRAMT_abs'].sum().reset_index()\n",
    "    \n",
    "    # 计算月度交易金额的标准差(波动性)\n",
    "    monthly_std = monthly_amt.groupby('CUST_NO')['APSDTRAMT_abs'].std().reset_index()\n",
    "    monthly_std.columns = ['CUST_NO', 'aps_monthly_amt_std']\n",
    "    features = features.merge(monthly_std, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 最近一个月交易金额\n",
    "    last_month = df[df['days_to_now'] < 30].groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "    last_month.columns = ['CUST_NO', 'aps_last_month_amt']\n",
    "    features = features.merge(last_month, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 次近一个月交易金额(30-60天)\n",
    "    second_month = df[(df['days_to_now'] >= 30) & (df['days_to_now'] < 60)].groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "    second_month.columns = ['CUST_NO', 'aps_second_month_amt']\n",
    "    features = features.merge(second_month, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月环比变化\n",
    "    features['aps_month_mom_change'] = (features['aps_last_month_amt'] - features['aps_second_month_amt']) / (features['aps_second_month_amt'] + 1e-5)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"时序趋势特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aaa45c55",
   "metadata": {},
   "source": [
    "### 4. 周期性特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "548e010d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "周期性特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def periodicity_features(df):\n",
    "    \"\"\"\n",
    "    生成周期性特征\n",
    "    包括:工作日/周末、月初/月中/月末等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    # 工作日 vs 周末交易特征\n",
    "    weekday_df = df[df['is_weekend'] == 0]\n",
    "    weekend_df = df[df['is_weekend'] == 1]\n",
    "    \n",
    "    # 工作日交易统计\n",
    "    weekday_stats = weekday_df.groupby('CUST_NO').agg({\n",
    "        'APSDTRAMT_abs': ['sum', 'mean', 'count'],\n",
    "        'APSDTRDAT': 'nunique'\n",
    "    }).reset_index()\n",
    "    weekday_stats.columns = ['CUST_NO', 'aps_weekday_amt_sum', 'aps_weekday_amt_mean', \n",
    "                              'aps_weekday_cnt', 'aps_weekday_active_days']\n",
    "    features = features.merge(weekday_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周末交易统计\n",
    "    weekend_stats = weekend_df.groupby('CUST_NO').agg({\n",
    "        'APSDTRAMT_abs': ['sum', 'mean', 'count'],\n",
    "        'APSDTRDAT': 'nunique'\n",
    "    }).reset_index()\n",
    "    weekend_stats.columns = ['CUST_NO', 'aps_weekend_amt_sum', 'aps_weekend_amt_mean',\n",
    "                              'aps_weekend_cnt', 'aps_weekend_active_days']\n",
    "    features = features.merge(weekend_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 工作日/周末比率\n",
    "    features['aps_weekday_weekend_amt_ratio'] = features['aps_weekday_amt_sum'] / (features['aps_weekend_amt_sum'] + 1e-5)\n",
    "    features['aps_weekday_weekend_cnt_ratio'] = features['aps_weekday_cnt'] / (features['aps_weekend_cnt'] + 1e-5)\n",
    "    \n",
    "    # 月初(1-10日)、月中(11-20日)、月末(21-31日)交易特征\n",
    "    df_copy = df.copy()\n",
    "    df_copy['period'] = pd.cut(df_copy['day'], bins=[0, 10, 20, 31], labels=['early', 'mid', 'late'])\n",
    "    \n",
    "    for period in ['early', 'mid', 'late']:\n",
    "        period_df = df_copy[df_copy['period'] == period]\n",
    "        period_stats = period_df.groupby('CUST_NO').agg({\n",
    "            'APSDTRAMT_abs': ['sum', 'mean', 'count']\n",
    "        }).reset_index()\n",
    "        period_stats.columns = ['CUST_NO', f'aps_{period}_month_amt_sum', \n",
    "                                f'aps_{period}_month_amt_mean', f'aps_{period}_month_cnt']\n",
    "        features = features.merge(period_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月末交易占比\n",
    "    features['aps_late_month_ratio'] = features['aps_late_month_cnt'] / (features['aps_early_month_cnt'] + features['aps_mid_month_cnt'] + features['aps_late_month_cnt'] + 1e-5)\n",
    "    \n",
    "    # 每周几交易统计\n",
    "    for day in range(7):\n",
    "        day_df = df[df['weekday'] == day]\n",
    "        day_cnt = day_df.groupby('CUST_NO').size().reset_index(name=f'aps_weekday_{day}_cnt')\n",
    "        features = features.merge(day_cnt, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"周期性特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9d53a4d4",
   "metadata": {},
   "source": [
    "### 5. 分类特征编码函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "38118d10",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "分类特征编码函数已定义\n"
     ]
    }
   ],
   "source": [
    "def categorical_encoding_features(df, windows=[30, 60, 90]):\n",
    "    \"\"\"\n",
    "    生成分类特征(交易代码、渠道、摘要)的编码特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        # 交易代码频次编码\n",
    "        cod_freq = sub_df.groupby('APSDTRCOD').size().to_dict()\n",
    "        sub_df[f'cod_freq_{window}d'] = sub_df['APSDTRCOD'].map(cod_freq)\n",
    "        cod_freq_feat = sub_df.groupby('CUST_NO')[f'cod_freq_{window}d'].agg(['mean', 'max', 'min']).reset_index()\n",
    "        cod_freq_feat.columns = ['CUST_NO', f'aps_cod_freq_mean_{window}d', \n",
    "                                  f'aps_cod_freq_max_{window}d', f'aps_cod_freq_min_{window}d']\n",
    "        features = features.merge(cod_freq_feat, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易渠道频次编码\n",
    "        chl_freq = sub_df.groupby('APSDTRCHL').size().to_dict()\n",
    "        sub_df[f'chl_freq_{window}d'] = sub_df['APSDTRCHL'].map(chl_freq)\n",
    "        chl_freq_feat = sub_df.groupby('CUST_NO')[f'chl_freq_{window}d'].agg(['mean', 'max', 'min']).reset_index()\n",
    "        chl_freq_feat.columns = ['CUST_NO', f'aps_chl_freq_mean_{window}d',\n",
    "                                  f'aps_chl_freq_max_{window}d', f'aps_chl_freq_min_{window}d']\n",
    "        features = features.merge(chl_freq_feat, on='CUST_NO', how='left')\n",
    "        \n",
    "        # Top交易代码占比\n",
    "        top_codes = sub_df['APSDTRCOD'].value_counts().head(10).index.tolist()\n",
    "        for i, code in enumerate(top_codes[:5], 1):\n",
    "            code_cnt = sub_df[sub_df['APSDTRCOD'] == code].groupby('CUST_NO').size().reset_index(name=f'aps_top{i}_cod_cnt_{window}d')\n",
    "            features = features.merge(code_cnt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # Top交易渠道占比\n",
    "        top_channels = sub_df['APSDTRCHL'].value_counts().head(10).index.tolist()\n",
    "        for i, chl in enumerate(top_channels[:5], 1):\n",
    "            chl_cnt = sub_df[sub_df['APSDTRCHL'] == chl].groupby('CUST_NO').size().reset_index(name=f'aps_top{i}_chl_cnt_{window}d')\n",
    "            features = features.merge(chl_cnt, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"分类特征编码函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2b65c5aa",
   "metadata": {},
   "source": [
    "### 6. 流入流出分离特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "22ad808d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "流入流出特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def inflow_outflow_features(df, windows=[7, 15, 30, 60, 90]):\n",
    "    \"\"\"\n",
    "    生成流入流出分离特征\n",
    "    参考往年优秀方案的核心特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    # 分离流入和流出数据\n",
    "    df_in = df[df['APSDTRAMT'] >= 0].copy()\n",
    "    df_out = df[df['APSDTRAMT'] < 0].copy()\n",
    "    df_out['APSDTRAMT_abs'] = df_out['APSDTRAMT'].abs()\n",
    "    \n",
    "    for window in windows:\n",
    "        # 流入特征\n",
    "        in_sub = df_in[df_in['days_to_now'] < window]\n",
    "        in_stats = in_sub.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "            'sum', 'mean', 'count', 'max', 'min', 'std', 'median'\n",
    "        ]).reset_index()\n",
    "        in_stats.columns = ['CUST_NO'] + [f'aps_in_{window}d_{x}' for x in ['sum', 'mean', 'count', 'max', 'min', 'std', 'median']]\n",
    "        features = features.merge(in_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流出特征\n",
    "        out_sub = df_out[df_out['days_to_now'] < window]\n",
    "        out_stats = out_sub.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "            'sum', 'mean', 'count', 'max', 'min', 'std', 'median'\n",
    "        ]).reset_index()\n",
    "        out_stats.columns = ['CUST_NO'] + [f'aps_out_{window}d_{x}' for x in ['sum', 'mean', 'count', 'max', 'min', 'std', 'median']]\n",
    "        features = features.merge(out_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流入流出比率特征\n",
    "        features[f'aps_in_out_amt_ratio_{window}d'] = features[f'aps_in_{window}d_sum'] / (features[f'aps_out_{window}d_sum'] + 1e-5)\n",
    "        features[f'aps_in_out_cnt_ratio_{window}d'] = features[f'aps_in_{window}d_count'] / (features[f'aps_out_{window}d_count'] + 1e-5)\n",
    "        \n",
    "        # 净流入(流入-流出)\n",
    "        features[f'aps_net_inflow_{window}d'] = features[f'aps_in_{window}d_sum'] - features[f'aps_out_{window}d_sum']\n",
    "        \n",
    "        # 流入活跃天数\n",
    "        in_active = in_sub.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "        in_active.columns = ['CUST_NO', f'aps_in_active_days_{window}d']\n",
    "        features = features.merge(in_active, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流出活跃天数\n",
    "        out_active = out_sub.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "        out_active.columns = ['CUST_NO', f'aps_out_active_days_{window}d']\n",
    "        features = features.merge(out_active, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流入流出频率差异\n",
    "        features[f'aps_in_out_freq_diff_{window}d'] = (features[f'aps_in_{window}d_count'] / (features[f'aps_in_active_days_{window}d'] + 1)) - \\\n",
    "                                                        (features[f'aps_out_{window}d_count'] / (features[f'aps_out_active_days_{window}d'] + 1))\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"流入流出特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2cb7c1f5",
   "metadata": {},
   "source": [
    "### 7. 交易稳定性与异常检测特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "17be7ce6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "稳定性与异常检测特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def stability_and_anomaly_features(df, windows=[30, 60, 90]):\n",
    "    \"\"\"\n",
    "    生成交易稳定性和异常检测特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        # 交易间隔统计\n",
    "        sub_df = sub_df.sort_values(['CUST_NO', 'APSDTRDAT'])\n",
    "        sub_df['days_diff'] = sub_df.groupby('CUST_NO')['APSDTRDAT'].diff().dt.days\n",
    "        \n",
    "        interval_stats = sub_df.groupby('CUST_NO')['days_diff'].agg(['mean', 'std', 'max', 'min']).reset_index()\n",
    "        interval_stats.columns = ['CUST_NO', f'aps_interval_mean_{window}d', f'aps_interval_std_{window}d',\n",
    "                                   f'aps_interval_max_{window}d', f'aps_interval_min_{window}d']\n",
    "        features = features.merge(interval_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易规律性(间隔标准差/均值)\n",
    "        features[f'aps_regularity_{window}d'] = features[f'aps_interval_std_{window}d'] / (features[f'aps_interval_mean_{window}d'] + 1e-5)\n",
    "        \n",
    "        # 异常大额交易检测\n",
    "        amt_stats = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].agg(['mean', 'std']).reset_index()\n",
    "        amt_stats['threshold'] = amt_stats['mean'] + 3 * amt_stats['std']\n",
    "        \n",
    "        # 合并阈值\n",
    "        sub_df_with_threshold = sub_df.merge(amt_stats[['CUST_NO', 'threshold']], on='CUST_NO', how='left')\n",
    "        sub_df_with_threshold['is_outlier'] = (sub_df_with_threshold['APSDTRAMT_abs'] > sub_df_with_threshold['threshold']).astype(int)\n",
    "        \n",
    "        outlier_cnt = sub_df_with_threshold.groupby('CUST_NO')['is_outlier'].sum().reset_index()\n",
    "        outlier_cnt.columns = ['CUST_NO', f'aps_outlier_cnt_{window}d']\n",
    "        features = features.merge(outlier_cnt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 异常交易占比\n",
    "        total_cnt = sub_df.groupby('CUST_NO').size().reset_index(name='total')\n",
    "        features = features.merge(total_cnt, on='CUST_NO', how='left')\n",
    "        features[f'aps_outlier_ratio_{window}d'] = features[f'aps_outlier_cnt_{window}d'] / (features['total'] + 1e-5)\n",
    "        features.drop('total', axis=1, inplace=True)\n",
    "        \n",
    "        # 连续交易天数最大值\n",
    "        sub_df_daily = sub_df.groupby(['CUST_NO', 'APSDTRDAT']).size().reset_index()\n",
    "        sub_df_daily = sub_df_daily.sort_values(['CUST_NO', 'APSDTRDAT'])\n",
    "        \n",
    "        def max_consecutive_days(group):\n",
    "            dates = pd.to_datetime(group['APSDTRDAT']).sort_values()\n",
    "            if len(dates) <= 1:\n",
    "                return 1\n",
    "            \n",
    "            max_consecutive = 1\n",
    "            current_consecutive = 1\n",
    "            \n",
    "            for i in range(1, len(dates)):\n",
    "                if (dates.iloc[i] - dates.iloc[i-1]).days == 1:\n",
    "                    current_consecutive += 1\n",
    "                    max_consecutive = max(max_consecutive, current_consecutive)\n",
    "                else:\n",
    "                    current_consecutive = 1\n",
    "            \n",
    "            return max_consecutive\n",
    "        \n",
    "        max_consec = sub_df_daily.groupby('CUST_NO').apply(max_consecutive_days).reset_index()\n",
    "        max_consec.columns = ['CUST_NO', f'aps_max_consecutive_days_{window}d']\n",
    "        features = features.merge(max_consec, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"稳定性与异常检测特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f85127a7",
   "metadata": {},
   "source": [
    "### 8. 首末次交易特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "d5f2a7c8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "首末次交易特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def first_last_transaction_features(df):\n",
    "    \"\"\"\n",
    "    生成首末次交易特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    df_sorted = df.sort_values(['CUST_NO', 'APSDTRDAT'])\n",
    "    \n",
    "    # 首次交易特征\n",
    "    first_txn = df_sorted.groupby('CUST_NO').first().reset_index()\n",
    "    features['aps_first_txn_amt'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['APSDTRAMT_abs'])\n",
    "    features['aps_first_txn_days_ago'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['days_to_now'])\n",
    "    features['aps_first_txn_is_income'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['is_income'])\n",
    "    \n",
    "    # 末次交易特征\n",
    "    last_txn = df_sorted.groupby('CUST_NO').last().reset_index()\n",
    "    features['aps_last_txn_amt'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['APSDTRAMT_abs'])\n",
    "    features['aps_last_txn_days_ago'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['days_to_now'])\n",
    "    features['aps_last_txn_is_income'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['is_income'])\n",
    "    \n",
    "    # 首末次交易金额比\n",
    "    features['aps_first_last_amt_ratio'] = features['aps_first_txn_amt'] / (features['aps_last_txn_amt'] + 1e-5)\n",
    "    \n",
    "    # 交易生命周期(首末次交易时间差)\n",
    "    features['aps_lifecycle_days'] = features['aps_first_txn_days_ago'] - features['aps_last_txn_days_ago']\n",
    "    \n",
    "    # 近7天是否有交易\n",
    "    features['aps_has_txn_last_7d'] = (features['aps_last_txn_days_ago'] < 7).astype(int)\n",
    "    \n",
    "    # 近30天是否有交易\n",
    "    features['aps_has_txn_last_30d'] = (features['aps_last_txn_days_ago'] < 30).astype(int)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"首末次交易特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6472f298",
   "metadata": {},
   "source": [
    "## 特征生成执行"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9d3f5218",
   "metadata": {},
   "source": [
    "### 1. 生成全量交易特征(包含流入+流出)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "3be70940",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "开始生成活期交易特征\n",
      "================================================================================\n",
      "客户数: 5,616\n",
      "\n",
      "[1/8] 生成时间窗口统计特征...\n",
      "   当前特征数: 42\n",
      "\n",
      "[2/8] 生成交易行为特征...\n",
      "   当前特征数: 87\n",
      "\n",
      "[3/8] 生成时序趋势特征...\n",
      "   当前特征数: 99\n",
      "\n",
      "[4/8] 生成周期性特征...\n",
      "   当前特征数: 126\n",
      "\n",
      "[5/8] 生成分类特征编码...\n",
      "   当前特征数: 174\n",
      "\n",
      "[6/8] 生成流入流出分离特征...\n",
      "   当前特征数: 274\n",
      "\n",
      "[7/8] 生成稳定性与异常检测特征...\n",
      "   当前特征数: 298\n",
      "\n",
      "[8/8] 生成首末次交易特征...\n",
      "   当前特征数: 308\n",
      "\n",
      "================================================================================\n",
      "特征生成完成! 总特征数: 308\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "print(\"=\"*80)\n",
    "print(\"开始生成活期交易特征\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 初始化特征集合\n",
    "aps_features = pd.DataFrame({'CUST_NO': tr_aps['CUST_NO'].unique()})\n",
    "print(f\"客户数: {aps_features.shape[0]:,}\")\n",
    "\n",
    "# 1. 时间窗口统计特征 - 全量交易金额\n",
    "print(\"\\n[1/8] 生成时间窗口统计特征...\")\n",
    "time_windows = [1, 3, 7, 15, 30, 60, 90]\n",
    "window_features = time_window_stats(tr_aps, 'CUST_NO', 'APSDTRAMT_abs', \n",
    "                                     time_windows, 'aps_all', \n",
    "                                     ['sum', 'mean', 'count', 'max', 'min', 'std'])\n",
    "aps_features = aps_features.merge(window_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 2. 交易行为特征\n",
    "print(\"\\n[2/8] 生成交易行为特征...\")\n",
    "behavior_features = transaction_behavior_features(tr_aps, windows=[7, 15, 30, 60, 90])\n",
    "aps_features = aps_features.merge(behavior_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 3. 时序趋势特征\n",
    "print(\"\\n[3/8] 生成时序趋势特征...\")\n",
    "trend_features = time_trend_features(tr_aps)\n",
    "aps_features = aps_features.merge(trend_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 4. 周期性特征\n",
    "print(\"\\n[4/8] 生成周期性特征...\")\n",
    "period_features = periodicity_features(tr_aps)\n",
    "aps_features = aps_features.merge(period_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 5. 分类特征编码\n",
    "print(\"\\n[5/8] 生成分类特征编码...\")\n",
    "cat_features = categorical_encoding_features(tr_aps, windows=[30, 60, 90])\n",
    "aps_features = aps_features.merge(cat_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 6. 流入流出分离特征\n",
    "print(\"\\n[6/8] 生成流入流出分离特征...\")\n",
    "inout_features = inflow_outflow_features(tr_aps, windows=[7, 15, 30, 60, 90])\n",
    "aps_features = aps_features.merge(inout_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 7. 稳定性与异常检测特征\n",
    "print(\"\\n[7/8] 生成稳定性与异常检测特征...\")\n",
    "stability_features = stability_and_anomaly_features(tr_aps, windows=[30, 60, 90])\n",
    "aps_features = aps_features.merge(stability_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 8. 首末次交易特征\n",
    "print(\"\\n[8/8] 生成首末次交易特征...\")\n",
    "first_last_features = first_last_transaction_features(tr_aps)\n",
    "aps_features = aps_features.merge(first_last_features, on='CUST_NO', how='left')\n",
    "print(f\"   当前特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(f\"特征生成完成! 总特征数: {aps_features.shape[1] - 1}\")\n",
    "print(\"=\"*80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "acc6a156",
   "metadata": {},
   "source": [
    "### 2. 数据质量检查"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "3085fe06",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征数据质量检查:\n",
      "--------------------------------------------------------------------------------\n",
      "\n",
      "缺失率 > 50% 的特征数: 77\n",
      "缺失率 > 80% 的特征数: 10\n",
      "\n",
      "缺失率较高的前10个特征:\n",
      "aps_top5_cod_cnt_60d       95.779915\n",
      "aps_top4_cod_cnt_90d       95.548433\n",
      "aps_large_txn_ratio_7d     90.509259\n",
      "aps_all_1d_std             86.609687\n",
      "aps_large_txn_ratio_15d    85.612536\n",
      "aps_top5_cod_cnt_30d       83.814103\n",
      "aps_in_7d_std              81.819801\n",
      "aps_top5_chl_cnt_30d       81.677350\n",
      "aps_large_txn_ratio_30d    80.324074\n",
      "aps_top4_chl_cnt_30d       80.288462\n",
      "dtype: float64\n",
      "\n",
      "填充缺失值...\n",
      "\n",
      "无无穷值特征\n",
      "\n",
      "最终特征数据形状: (5616, 309)\n",
      "客户数: 5,616\n",
      "特征数: 308\n",
      "\n",
      "特征预览:\n",
      "                            CUST_NO  aps_all_1d_sum  aps_all_1d_mean  \\\n",
      "0  0004b47a5be96a8379e58932489ca6da             0.0              0.0   \n",
      "1  00347b0bb271750962f327d2d5bbf737            28.8             28.8   \n",
      "2  003a4f36babe7d605de31f715cc9bb23             0.0              0.0   \n",
      "3  003a92321582961d0cf6f86f7f3464fc             0.0              0.0   \n",
      "4  00457ecc79efe086078ac66df918d059             0.0              0.0   \n",
      "\n",
      "   aps_all_1d_count  aps_all_1d_max  aps_all_1d_min  aps_all_1d_std  \\\n",
      "0               0.0             0.0             0.0             0.0   \n",
      "1               1.0            28.8            28.8             0.0   \n",
      "2               0.0             0.0             0.0             0.0   \n",
      "3               0.0             0.0             0.0             0.0   \n",
      "4               0.0             0.0             0.0             0.0   \n",
      "\n",
      "   aps_all_3d_sum  aps_all_3d_mean  aps_all_3d_count  ...  aps_first_txn_amt  \\\n",
      "0             0.0              0.0               0.0  ...                2.0   \n",
      "1            87.8             43.9               2.0  ...              148.0   \n",
      "2             0.0              0.0               0.0  ...             2000.0   \n",
      "3             0.0              0.0               0.0  ...                2.0   \n",
      "4             0.0              0.0               0.0  ...              196.7   \n",
      "\n",
      "   aps_first_txn_days_ago  aps_first_txn_is_income  aps_last_txn_amt  \\\n",
      "0                      84                        0              0.08   \n",
      "1                      90                        1             28.80   \n",
      "2                      89                        1            106.00   \n",
      "3                      82                        0              0.94   \n",
      "4                      85                        0             22.73   \n",
      "\n",
      "   aps_last_txn_days_ago  aps_last_txn_is_income  aps_first_last_amt_ratio  \\\n",
      "0                      9                       1                 24.996875   \n",
      "1                      0                       0                  5.138887   \n",
      "2                      4                       0                 18.867923   \n",
      "3                      9                       1                  2.127637   \n",
      "4                      9                       1                  8.653758   \n",
      "\n",
      "   aps_lifecycle_days  aps_has_txn_last_7d  aps_has_txn_last_30d  \n",
      "0                  75                    0                     1  \n",
      "1                  90                    1                     1  \n",
      "2                  85                    1                     1  \n",
      "3                  73                    0                     1  \n",
      "4                  76                    0                     1  \n",
      "\n",
      "[5 rows x 309 columns]\n"
     ]
    }
   ],
   "source": [
    "# 检查特征质量\n",
    "print(\"特征数据质量检查:\")\n",
    "print(\"-\" * 80)\n",
    "\n",
    "# 缺失值统计\n",
    "missing_rate = (aps_features.isnull().sum() / len(aps_features) * 100).sort_values(ascending=False)\n",
    "print(f\"\\n缺失率 > 50% 的特征数: {(missing_rate > 50).sum()}\")\n",
    "print(f\"缺失率 > 80% 的特征数: {(missing_rate > 80).sum()}\")\n",
    "\n",
    "if (missing_rate > 50).sum() > 0:\n",
    "    print(\"\\n缺失率较高的前10个特征:\")\n",
    "    print(missing_rate.head(10))\n",
    "\n",
    "# 填充缺失值\n",
    "print(\"\\n填充缺失值...\")\n",
    "aps_features = aps_features.fillna(0)\n",
    "\n",
    "# 无穷值检查\n",
    "inf_cols = []\n",
    "for col in aps_features.columns:\n",
    "    if col != 'CUST_NO':\n",
    "        if np.isinf(aps_features[col]).any():\n",
    "            inf_cols.append(col)\n",
    "\n",
    "if len(inf_cols) > 0:\n",
    "    print(f\"\\n包含无穷值的特征数: {len(inf_cols)}\")\n",
    "    print(\"替换无穷值为0...\")\n",
    "    aps_features = aps_features.replace([np.inf, -np.inf], 0)\n",
    "else:\n",
    "    print(\"\\n无无穷值特征\")\n",
    "\n",
    "# 特征统计\n",
    "print(f\"\\n最终特征数据形状: {aps_features.shape}\")\n",
    "print(f\"客户数: {aps_features['CUST_NO'].nunique():,}\")\n",
    "print(f\"特征数: {aps_features.shape[1] - 1}\")\n",
    "\n",
    "# 预览特征\n",
    "print(\"\\n特征预览:\")\n",
    "print(aps_features.head())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "56ee6595",
   "metadata": {},
   "source": [
    "### 3. 保存特征文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cdc61c69",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确保特征目录存在\n",
    "feature_dir = './feature'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征目录: {feature_dir}\")\n",
    "\n",
    "# 保存为pickle格式\n",
    "output_file = os.path.join(feature_dir, 'TR_APS_DTL_features.pkl')\n",
    "with open(output_file, 'wb') as f:\n",
    "    pickle.dump(aps_features, f)\n",
    "\n",
    "print(f\"\\n特征文件已保存: {output_file}\")\n",
    "print(f\"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6904731c",
   "metadata": {},
   "source": [
    "## 特征说明总结\n",
    "\n",
    "本notebook基于往年优秀方案的分析,对活期交易明细表进行了全面的特征工程处理,主要包括以下特征类别:\n",
    "\n",
    "### 特征体系架构\n",
    "\n",
    "#### 1. 时间窗口统计特征\n",
    "- 多时间窗口(1/3/7/15/30/60/90天)\n",
    "- 基础统计量: sum, mean, count, max, min, std, median\n",
    "- 覆盖全量交易、流入交易、流出交易\n",
    "\n",
    "#### 2. 交易行为特征\n",
    "- 交易活跃度指标\n",
    "- 交易频率指标\n",
    "- 交易多样性指标(交易代码、渠道数量)\n",
    "- 交易金额变异系数\n",
    "- 大小额交易占比\n",
    "\n",
    "#### 3. 时序趋势特征\n",
    "- 不同时间窗口对比(7vs30, 30vs60, 60vs90天)\n",
    "- 日均交易金额/笔数比率\n",
    "- 月度波动性\n",
    "- 月环比变化\n",
    "\n",
    "#### 4. 周期性特征\n",
    "- 工作日vs周末交易统计\n",
    "- 月初/月中/月末交易分布\n",
    "- 每周几交易统计\n",
    "\n",
    "#### 5. 分类特征编码\n",
    "- 交易代码/渠道频次编码\n",
    "- Top交易代码/渠道统计\n",
    "- 频率统计特征\n",
    "\n",
    "#### 6. 流入流出分离特征(核心特征)\n",
    "- 流入/流出独立统计\n",
    "- 流入流出比率\n",
    "- 净流入金额\n",
    "- 流入流出频率差异\n",
    "\n",
    "#### 7. 稳定性与异常检测特征\n",
    "- 交易间隔统计\n",
    "- 交易规律性指标\n",
    "- 异常大额交易检测\n",
    "- 连续交易天数\n",
    "\n",
    "#### 8. 首末次交易特征\n",
    "- 首次/末次交易金额\n",
    "- 首次/末次交易时间\n",
    "- 交易生命周期\n",
    "- 近期交易标识\n",
    "\n",
    "### 设计亮点\n",
    "\n",
    "1. **多时间窗口**: 覆盖短期(1-7天)、中期(15-30天)、长期(60-90天),捕捉不同时间尺度的交易特征\n",
    "2. **流入流出分离**: 参考往年top方案,对流入流出交易分别建模,是关键特征\n",
    "3. **趋势对比**: 通过不同窗口对比,捕捉交易趋势变化\n",
    "4. **异常检测**: 基于3σ原则检测异常交易,提升特征区分度\n",
    "5. **周期性挖掘**: 工作日/周末、月初/月末等周期性模式挖掘\n",
    "\n",
    "### 特征数量\n",
    "预计生成300+个特征,涵盖交易行为的各个维度。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
