{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f8da670f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "b0312dc9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8a56037",
   "metadata": {},
   "source": [
    "# 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e1c98dc",
   "metadata": {},
   "source": [
    "## 数据导入通用函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "869edf9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ee0084d",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a5bddf6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 ASSET_data 已加载为 DataFrame\n",
      "数据集 CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 NATURE_data 已加载为 DataFrame\n",
      "数据集 PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TARGET_data 已加载为 DataFrame\n",
      "数据集 TARGET_VALID_data 已加载为 DataFrame\n",
      "数据集 TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = '../DATA'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ed726c42",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "97ac9c64",
   "metadata": {},
   "source": [
    "### 日期转换与距今天数计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "51754f4a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "交易日期范围: 2025-04-01 ~ 2025-06-30\n",
      "距今天数范围: 0 ~ 90\n",
      "月份分布: date_months_to_now\n",
      "0    121209\n",
      "1    114061\n",
      "2    110043\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "def get_aps_days_to_now(df):\n",
    "    \"\"\"\n",
    "    将交易日期转换为距今天数特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 活期交易表数据\n",
    "    \n",
    "    返回:\n",
    "    - 添加了时间特征的数据框\n",
    "    \"\"\"\n",
    "    # 日期转换\n",
    "    df[\"date\"] = pd.to_datetime(df[\"APSDTRDAT\"], format=\"%Y%m%d\")\n",
    "    \n",
    "    # 计算距最大日期的天数\n",
    "    max_date = df[\"date\"].max()\n",
    "    df_days_to_now = (max_date - df[\"date\"]).dt.days\n",
    "    \n",
    "    # 添加时间维度特征\n",
    "    df[\"date_months_to_now\"] = df_days_to_now // 31  # 距今月数(0, 1, 2对应最近3个月)\n",
    "    df[\"date_weeks_to_now\"] = df_days_to_now // 7    # 距今周数\n",
    "    df[\"date_days_to_now\"] = df_days_to_now          # 距今天数\n",
    "    \n",
    "    print(f\"交易日期范围: {df['date'].min().date()} ~ {df['date'].max().date()}\")\n",
    "    print(f\"距今天数范围: {df['date_days_to_now'].min()} ~ {df['date_days_to_now'].max()}\")\n",
    "    print(f\"月份分布: {df['date_months_to_now'].value_counts().sort_index()}\")\n",
    "    \n",
    "    return df \n",
    "\n",
    "# 执行日期转换\n",
    "tr_aps_dtl = get_aps_days_to_now(tr_aps_dtl)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3f307773",
   "metadata": {},
   "source": [
    "### 活期交易通用特征工程函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "25bb143d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "通用聚合函数定义完成\n"
     ]
    }
   ],
   "source": [
    "# ==================== 通用聚合函数 ====================\n",
    "\n",
    "def get_dense_features(df, col, stat):\n",
    "    \"\"\"按客户ID聚合数值特征\"\"\"\n",
    "    if stat == \"kurt\":\n",
    "        f_stat = lambda x: x.kurt()\n",
    "    elif stat == \"quantile_1_4\":\n",
    "        f_stat = lambda x: x.quantile(0.25)\n",
    "    elif stat == \"quantile_1_2\":\n",
    "        f_stat = lambda x: x.quantile(0.5)\n",
    "    else:\n",
    "        f_stat = stat\n",
    "    \n",
    "    group_df = df.groupby(['CUST_NO'])[col].agg(f_stat).reset_index()\n",
    "    group_df.columns = ['CUST_NO', 'CUST_NO_'+'{}_'.format(col)+stat]\n",
    "    return group_df\n",
    "\n",
    "def get_all_dense_features(df_fea, df_to_groupby, stats):\n",
    "    \"\"\"批量生成数值型特征的统计量\"\"\"\n",
    "    dense_col = [col for col in df_to_groupby.columns if col != \"CUST_NO\"]\n",
    "    for col in tqdm(dense_col, desc=\"生成数值特征\"):\n",
    "        for stat in stats:\n",
    "            df_fea = df_fea.merge(get_dense_features(df_to_groupby, col, stat), on='CUST_NO', how='left')\n",
    "    return df_fea\n",
    "\n",
    "def get_id_category_features(df_fea, df_to_groupby, fea1, fea2, stat):\n",
    "    \"\"\"\n",
    "    按客户ID和类别特征聚合\n",
    "    fea1: 类别特征名(如交易代码、渠道)\n",
    "    fea2: 要聚合的数值特征名\n",
    "    stat: 统计函数\n",
    "    \"\"\"\n",
    "    tmp = df_to_groupby.groupby(['CUST_NO', fea1])[fea2].agg(\n",
    "        stat if stat != \"kurt\" else lambda x: x.kurt()\n",
    "    ).to_frame(\n",
    "        '_'.join(['CUST_NO', fea1, fea2, stat])\n",
    "    ).reset_index()\n",
    "    \n",
    "    # 透视表: 将类别特征展开为多列\n",
    "    df_tmp = pd.pivot(data=tmp, index='CUST_NO', columns=fea1, values='_'.join(['CUST_NO', fea1, fea2, stat]))\n",
    "    new_fea_cols = ['_'.join(['CUST_NO', fea1, fea2, stat, str(col)]) for col in df_tmp.columns]\n",
    "    df_tmp.columns = new_fea_cols\n",
    "    df_tmp.reset_index(inplace=True)\n",
    "        \n",
    "    if stat == 'count':\n",
    "        df_tmp = df_tmp.fillna(0)\n",
    "        \n",
    "    # 去掉全NaN列\n",
    "    valid_cols = []\n",
    "    for col in df_tmp.columns:\n",
    "        if not df_tmp[col].isna().all():\n",
    "            valid_cols.append(col)\n",
    "            \n",
    "    df_fea = df_fea.merge(df_tmp[valid_cols], on='CUST_NO', how='left')\n",
    "    return df_fea, new_fea_cols \n",
    "\n",
    "def get_all_id_category_features(df_fea, df_to_groupby, fea1, fea2, stats):\n",
    "    \"\"\"批量生成类别特征交叉统计\"\"\"\n",
    "    all_new_fea_cols = []\n",
    "    for stat in tqdm(stats, desc=f\"生成{fea1}分组特征\"):\n",
    "        df_fea, new_fea_cols = get_id_category_features(df_fea, df_to_groupby, fea1, fea2, stat)\n",
    "        all_new_fea_cols += new_fea_cols\n",
    "    return df_fea, all_new_fea_cols\n",
    "\n",
    "def get_division_features(df1, df2, col1, col2, eps=1e-6):\n",
    "    \"\"\"生成除法特征(比值特征)\"\"\"\n",
    "    tmp = pd.merge(df1, df2, how=\"left\", on=\"CUST_NO\")\n",
    "    new_feature_name = '_'.join([col1, \"div\", col2])\n",
    "    tmp[new_feature_name] = tmp[col1] / (tmp[col2] + eps)\n",
    "    feature_name = [\"CUST_NO\", new_feature_name]\n",
    "    return tmp[feature_name]\n",
    "\n",
    "print(\"通用聚合函数定义完成\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "41507815",
   "metadata": {},
   "source": [
    "### 时间特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e53034ad",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "扩展特征函数定义完成\n"
     ]
    }
   ],
   "source": [
    "# ==================== 扩展特征函数 ====================\n",
    "\n",
    "def get_time_series_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成时间序列特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 连续活跃天数\n",
    "    2. 最大连续活跃天数\n",
    "    3. 活跃天数占比\n",
    "    4. 交易间隔统计(均值/标准差/最大/最小)\n",
    "    \"\"\"\n",
    "    print(\"生成时间序列特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 按客户统计活跃天数\n",
    "    active_days = df.groupby('CUST_NO')['date_days_to_now'].nunique().reset_index()\n",
    "    active_days.columns = ['CUST_NO', 'active_days_count']\n",
    "    tmp_feature = tmp_feature.merge(active_days, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 活跃天数占比(活跃天数/总天数)\n",
    "    max_days = df['date_days_to_now'].max() - df['date_days_to_now'].min() + 1\n",
    "    tmp_feature['active_days_ratio'] = tmp_feature['active_days_count'] / max_days\n",
    "    \n",
    "    # 交易间隔统计\n",
    "    for stat in ['mean', 'std', 'max', 'min']:\n",
    "        interval_stat = df.groupby('CUST_NO')['date_days_to_now'].apply(\n",
    "            lambda x: x.sort_values().diff().dropna().agg(stat) if len(x) > 1 else 0\n",
    "        ).reset_index()\n",
    "        interval_stat.columns = ['CUST_NO', f'transaction_interval_{stat}']\n",
    "        tmp_feature = tmp_feature.merge(interval_stat, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"时间序列特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_amount_distribution_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成金额分布特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 金额变异系数(CV = std/mean)\n",
    "    2. 金额集中度(top3笔金额占比)\n",
    "    3. 大额交易占比(>均值+2std的笔数占比)\n",
    "    4. 小额交易占比(<均值-2std的笔数占比)\n",
    "    5. 金额分位数(0.1, 0.25, 0.5, 0.75, 0.9)\n",
    "    \"\"\"\n",
    "    print(\"生成金额分布特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 金额变异系数\n",
    "    cv_df = df.groupby('CUST_NO')['APSDTRAMT'].apply(\n",
    "        lambda x: x.std() / x.mean() if x.mean() != 0 else 0\n",
    "    ).reset_index()\n",
    "    cv_df.columns = ['CUST_NO', 'amount_cv']\n",
    "    tmp_feature = tmp_feature.merge(cv_df, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 金额集中度(top3占比)\n",
    "    def top3_ratio(x):\n",
    "        if len(x) == 0:\n",
    "            return 0\n",
    "        top3_sum = x.abs().nlargest(min(3, len(x))).sum()\n",
    "        total_sum = x.abs().sum()\n",
    "        return top3_sum / total_sum if total_sum > 0 else 0\n",
    "    \n",
    "    concentration = df.groupby('CUST_NO')['APSDTRAMT'].apply(top3_ratio).reset_index()\n",
    "    concentration.columns = ['CUST_NO', 'amount_top3_concentration']\n",
    "    tmp_feature = tmp_feature.merge(concentration, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 大额/小额交易占比\n",
    "    def outlier_ratio(x, threshold_type='high'):\n",
    "        if len(x) == 0:\n",
    "            return 0\n",
    "        mean_val = x.mean()\n",
    "        std_val = x.std()\n",
    "        if threshold_type == 'high':\n",
    "            threshold = mean_val + 2 * std_val\n",
    "            return (x > threshold).sum() / len(x)\n",
    "        else:\n",
    "            threshold = mean_val - 2 * std_val\n",
    "            return (x < threshold).sum() / len(x)\n",
    "    \n",
    "    high_ratio = df.groupby('CUST_NO')['APSDTRAMT'].apply(\n",
    "        lambda x: outlier_ratio(x, 'high')\n",
    "    ).reset_index()\n",
    "    high_ratio.columns = ['CUST_NO', 'large_transaction_ratio']\n",
    "    tmp_feature = tmp_feature.merge(high_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    low_ratio = df.groupby('CUST_NO')['APSDTRAMT'].apply(\n",
    "        lambda x: outlier_ratio(x, 'low')\n",
    "    ).reset_index()\n",
    "    low_ratio.columns = ['CUST_NO', 'small_transaction_ratio']\n",
    "    tmp_feature = tmp_feature.merge(low_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 金额分位数\n",
    "    for q in [0.1, 0.25, 0.5, 0.75, 0.9]:\n",
    "        quantile_df = df.groupby('CUST_NO')['APSDTRAMT'].quantile(q).reset_index()\n",
    "        quantile_df.columns = ['CUST_NO', f'amount_quantile_{int(q*100)}']\n",
    "        tmp_feature = tmp_feature.merge(quantile_df, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"金额分布特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_channel_diversity_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成渠道多样性特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 使用渠道种类数\n",
    "    2. 渠道Shannon熵(衡量渠道使用均匀度)\n",
    "    3. 主渠道占比(使用最多的渠道占比)\n",
    "    4. 渠道切换频率(相邻交易渠道不同的次数占比)\n",
    "    5. 跨月渠道稳定性(各月使用渠道的交集占比)\n",
    "    \"\"\"\n",
    "    print(\"生成渠道多样性特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 使用渠道种类数\n",
    "    channel_count = df.groupby('CUST_NO')['APSDTRCHL'].nunique().reset_index()\n",
    "    channel_count.columns = ['CUST_NO', 'channel_diversity_count']\n",
    "    tmp_feature = tmp_feature.merge(channel_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 渠道Shannon熵\n",
    "    def shannon_entropy(x):\n",
    "        from scipy.stats import entropy\n",
    "        value_counts = x.value_counts()\n",
    "        probs = value_counts / value_counts.sum()\n",
    "        return entropy(probs, base=2)\n",
    "    \n",
    "    channel_entropy = df.groupby('CUST_NO')['APSDTRCHL'].apply(shannon_entropy).reset_index()\n",
    "    channel_entropy.columns = ['CUST_NO', 'channel_shannon_entropy']\n",
    "    tmp_feature = tmp_feature.merge(channel_entropy, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 主渠道占比\n",
    "    def main_channel_ratio(x):\n",
    "        if len(x) == 0:\n",
    "            return 0\n",
    "        return x.value_counts().iloc[0] / len(x)\n",
    "    \n",
    "    main_ratio = df.groupby('CUST_NO')['APSDTRCHL'].apply(main_channel_ratio).reset_index()\n",
    "    main_ratio.columns = ['CUST_NO', 'main_channel_ratio']\n",
    "    tmp_feature = tmp_feature.merge(main_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 渠道切换频率\n",
    "    def channel_switch_rate(group):\n",
    "        if len(group) <= 1:\n",
    "            return 0\n",
    "        channels = group.sort_values('date_days_to_now')['APSDTRCHL'].values\n",
    "        switches = (channels[:-1] != channels[1:]).sum()\n",
    "        return switches / (len(channels) - 1)\n",
    "    \n",
    "    switch_rate = df.groupby('CUST_NO').apply(channel_switch_rate).reset_index()\n",
    "    switch_rate.columns = ['CUST_NO', 'channel_switch_rate']\n",
    "    tmp_feature = tmp_feature.merge(switch_rate, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 跨月渠道稳定性\n",
    "    def cross_month_stability(group):\n",
    "        months = group['date_months_to_now'].unique()\n",
    "        if len(months) <= 1:\n",
    "            return 1.0\n",
    "        \n",
    "        month_channels = []\n",
    "        for month in months:\n",
    "            channels = set(group[group['date_months_to_now'] == month]['APSDTRCHL'].unique())\n",
    "            month_channels.append(channels)\n",
    "        \n",
    "        # 计算交集占并集的比例\n",
    "        intersection = set.intersection(*month_channels)\n",
    "        union = set.union(*month_channels)\n",
    "        return len(intersection) / len(union) if len(union) > 0 else 0\n",
    "    \n",
    "    stability = df.groupby('CUST_NO').apply(cross_month_stability).reset_index()\n",
    "    stability.columns = ['CUST_NO', 'channel_cross_month_stability']\n",
    "    tmp_feature = tmp_feature.merge(stability, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"渠道多样性特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_transaction_code_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成交易代码特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 使用交易代码种类数\n",
    "    2. 交易代码Shannon熵\n",
    "    3. 主交易代码占比\n",
    "    4. 交易代码切换频率\n",
    "    \"\"\"\n",
    "    print(\"生成交易代码特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 使用交易代码种类数\n",
    "    code_count = df.groupby('CUST_NO')['APSDTRCOD'].nunique().reset_index()\n",
    "    code_count.columns = ['CUST_NO', 'transaction_code_diversity_count']\n",
    "    tmp_feature = tmp_feature.merge(code_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 交易代码Shannon熵\n",
    "    def shannon_entropy(x):\n",
    "        from scipy.stats import entropy\n",
    "        value_counts = x.value_counts()\n",
    "        probs = value_counts / value_counts.sum()\n",
    "        return entropy(probs, base=2)\n",
    "    \n",
    "    code_entropy = df.groupby('CUST_NO')['APSDTRCOD'].apply(shannon_entropy).reset_index()\n",
    "    code_entropy.columns = ['CUST_NO', 'transaction_code_shannon_entropy']\n",
    "    tmp_feature = tmp_feature.merge(code_entropy, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 主交易代码占比\n",
    "    def main_code_ratio(x):\n",
    "        if len(x) == 0:\n",
    "            return 0\n",
    "        return x.value_counts().iloc[0] / len(x)\n",
    "    \n",
    "    main_ratio = df.groupby('CUST_NO')['APSDTRCOD'].apply(main_code_ratio).reset_index()\n",
    "    main_ratio.columns = ['CUST_NO', 'main_transaction_code_ratio']\n",
    "    tmp_feature = tmp_feature.merge(main_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"交易代码特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_behavioral_pattern_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成行为模式特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 工作日vs周末交易占比\n",
    "    2. 上午/下午/晚上交易占比(基于日期序列推断)\n",
    "    3. 月初/月中/月末交易占比\n",
    "    4. 交易规律性(周期性检测)\n",
    "    5. 连续大额交易次数\n",
    "    \"\"\"\n",
    "    print(\"生成行为模式特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 计算是否为工作日(假设周一至周五为工作日)\n",
    "    df_copy = df.copy()\n",
    "    df_copy['is_weekday'] = df_copy['date'].dt.dayofweek < 5\n",
    "    \n",
    "    weekday_ratio = df_copy.groupby('CUST_NO')['is_weekday'].mean().reset_index()\n",
    "    weekday_ratio.columns = ['CUST_NO', 'weekday_transaction_ratio']\n",
    "    tmp_feature = tmp_feature.merge(weekday_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月初/月中/月末交易占比\n",
    "    df_copy['day_of_month'] = df_copy['date'].dt.day\n",
    "    df_copy['month_period'] = pd.cut(df_copy['day_of_month'], \n",
    "                                       bins=[0, 10, 20, 31], \n",
    "                                       labels=['early', 'mid', 'late'])\n",
    "    \n",
    "    for period in ['early', 'mid', 'late']:\n",
    "        period_ratio = df_copy.groupby('CUST_NO')['month_period'].apply(\n",
    "            lambda x: (x == period).sum() / len(x)\n",
    "        ).reset_index()\n",
    "        period_ratio.columns = ['CUST_NO', f'month_{period}_transaction_ratio']\n",
    "        tmp_feature = tmp_feature.merge(period_ratio, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 连续大额交易检测(金额>均值的连续次数)\n",
    "    def consecutive_large_transactions(group):\n",
    "        sorted_group = group.sort_values('date_days_to_now')\n",
    "        mean_amt = sorted_group['APSDTRAMT'].abs().mean()\n",
    "        is_large = (sorted_group['APSDTRAMT'].abs() > mean_amt).astype(int)\n",
    "        \n",
    "        max_consecutive = 0\n",
    "        current_consecutive = 0\n",
    "        for val in is_large:\n",
    "            if val == 1:\n",
    "                current_consecutive += 1\n",
    "                max_consecutive = max(max_consecutive, current_consecutive)\n",
    "            else:\n",
    "                current_consecutive = 0\n",
    "        \n",
    "        return max_consecutive\n",
    "    \n",
    "    consecutive = df_copy.groupby('CUST_NO').apply(consecutive_large_transactions).reset_index()\n",
    "    consecutive.columns = ['CUST_NO', 'max_consecutive_large_transactions']\n",
    "    tmp_feature = tmp_feature.merge(consecutive, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"行为模式特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_advanced_quantile_features(df, tr_feature, quantiles=[0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]):\n",
    "    \"\"\"\n",
    "    生成高级分位数特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 多个分位数位置(0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95)\n",
    "    2. 分位数跨度(0.75-0.25, 0.9-0.1等)\n",
    "    3. 相对分位数位置(用户均值/全局分位数)\n",
    "    \"\"\"\n",
    "    print(\"生成高级分位数特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 用户平均金额\n",
    "    user_mean = df.groupby('CUST_NO')['APSDTRAMT'].mean().reset_index()\n",
    "    user_mean.columns = ['CUST_NO', 'user_mean_amount']\n",
    "    \n",
    "    # 全局分位数\n",
    "    global_quantiles = {}\n",
    "    for q in quantiles:\n",
    "        global_quantiles[q] = df['APSDTRAMT'].abs().quantile(q)\n",
    "    \n",
    "    # 相对分位数位置\n",
    "    for q in quantiles:\n",
    "        user_mean[f'relative_position_q{int(q*100)}'] = (\n",
    "            user_mean['user_mean_amount'].abs() > global_quantiles[q]\n",
    "        ).astype(float)\n",
    "    \n",
    "    tmp_feature = tmp_feature.merge(user_mean, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 分位数跨度\n",
    "    user_q25 = df.groupby('CUST_NO')['APSDTRAMT'].quantile(0.25).reset_index()\n",
    "    user_q75 = df.groupby('CUST_NO')['APSDTRAMT'].quantile(0.75).reset_index()\n",
    "    user_q25.columns = ['CUST_NO', 'q25']\n",
    "    user_q75.columns = ['CUST_NO', 'q75']\n",
    "    \n",
    "    iqr_df = user_q25.merge(user_q75, on='CUST_NO')\n",
    "    iqr_df['iqr_span'] = iqr_df['q75'] - iqr_df['q25']\n",
    "    tmp_feature = tmp_feature.merge(iqr_df[['CUST_NO', 'iqr_span']], on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"高级分位数特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "def get_stability_features(df, tr_feature):\n",
    "    \"\"\"\n",
    "    生成稳定性特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 月度交易金额稳定性(月度金额标准差/均值)\n",
    "    2. 月度交易笔数稳定性\n",
    "    3. 月度增长率(最近月/前月)\n",
    "    4. 趋势方向(线性回归斜率)\n",
    "    \"\"\"\n",
    "    print(\"生成稳定性特征...\")\n",
    "    tmp_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 月度交易金额统计\n",
    "    monthly_stats = df.groupby(['CUST_NO', 'date_months_to_now'])['APSDTRAMT'].agg(['sum', 'count']).reset_index()\n",
    "    \n",
    "    # 金额稳定性\n",
    "    amount_stability = monthly_stats.groupby('CUST_NO')['sum'].apply(\n",
    "        lambda x: x.std() / x.mean() if x.mean() != 0 else 0\n",
    "    ).reset_index()\n",
    "    amount_stability.columns = ['CUST_NO', 'monthly_amount_stability']\n",
    "    tmp_feature = tmp_feature.merge(amount_stability, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 笔数稳定性\n",
    "    count_stability = monthly_stats.groupby('CUST_NO')['count'].apply(\n",
    "        lambda x: x.std() / x.mean() if x.mean() != 0 else 0\n",
    "    ).reset_index()\n",
    "    count_stability.columns = ['CUST_NO', 'monthly_count_stability']\n",
    "    tmp_feature = tmp_feature.merge(count_stability, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月度增长率(最近月/上月)\n",
    "    def growth_rate(group):\n",
    "        sorted_months = group.sort_values('date_months_to_now')\n",
    "        if len(sorted_months) < 2:\n",
    "            return 0\n",
    "        recent = sorted_months.iloc[0]['sum']  # month 0 (最近)\n",
    "        previous = sorted_months.iloc[1]['sum'] if len(sorted_months) > 1 else recent\n",
    "        return (recent - previous) / abs(previous) if previous != 0 else 0\n",
    "    \n",
    "    growth = monthly_stats.groupby('CUST_NO').apply(growth_rate).reset_index()\n",
    "    growth.columns = ['CUST_NO', 'monthly_growth_rate']\n",
    "    tmp_feature = tmp_feature.merge(growth, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 趋势方向(简单线性拟合斜率)\n",
    "    def trend_slope(group):\n",
    "        sorted_months = group.sort_values('date_months_to_now')\n",
    "        if len(sorted_months) < 2:\n",
    "            return 0\n",
    "        x = sorted_months['date_months_to_now'].values\n",
    "        y = sorted_months['sum'].values\n",
    "        # 简单线性回归斜率\n",
    "        if len(x) > 1 and np.std(x) > 0:\n",
    "            slope = np.polyfit(x, y, 1)[0]\n",
    "            return slope\n",
    "        return 0\n",
    "    \n",
    "    trend = monthly_stats.groupby('CUST_NO').apply(trend_slope).reset_index()\n",
    "    trend.columns = ['CUST_NO', 'amount_trend_slope']\n",
    "    tmp_feature = tmp_feature.merge(trend, on='CUST_NO', how='left')\n",
    "    \n",
    "    print(f\"稳定性特征: {len(tmp_feature.columns) - 1} 个\")\n",
    "    return tmp_feature\n",
    "\n",
    "print(\"扩展特征函数定义完成\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "e6ab256a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RFM特征函数定义完成\n"
     ]
    }
   ],
   "source": [
    "# ==================== RFM特征函数 ====================\n",
    "\n",
    "def get_aps_recent_days_to_now(df_tr, month):\n",
    "    \"\"\"最近一笔交易距今天数(Recency特征)\"\"\"\n",
    "    tmp_df = df_tr.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\n",
    "        \"recent_days_to_now_{}\".format(str(month))\n",
    "    ).reset_index()\n",
    "    return tmp_df\n",
    "\n",
    "def get_aps_max_amt_days_to_now(df_tr, month):\n",
    "    \"\"\"最大一笔金额距今天数(剔除零值交易)\"\"\"\n",
    "    if df_tr[\"APSDTRAMT\"].max() > 0:\n",
    "        tmp_df = df_tr[df_tr[\"APSDTRAMT\"] > 0].groupby(['CUST_NO']).agg(\n",
    "            {\"APSDTRAMT\": \"max\"}\n",
    "        ).reset_index()\n",
    "    else:\n",
    "        tmp_df = df_tr[df_tr[\"APSDTRAMT\"] < 0].groupby(['CUST_NO']).agg(\n",
    "            {\"APSDTRAMT\": \"min\"}\n",
    "        ).reset_index()\n",
    "\n",
    "    tmp_df = tmp_df.merge(\n",
    "        df_tr[['CUST_NO', 'date_days_to_now', 'APSDTRAMT']], \n",
    "        on=[\"CUST_NO\", 'APSDTRAMT'], \n",
    "        how=\"inner\"\n",
    "    )\n",
    "    tmp_df_day = tmp_df.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\n",
    "        \"max_amt_days_to_now_{}\".format(str(month))\n",
    "    ).reset_index()\n",
    "    tmp_df_amt = tmp_df.groupby(['CUST_NO']).agg(\n",
    "        {\"APSDTRAMT\": \"max\"}\n",
    "    ).reset_index()\n",
    "    tmp_df_amt.columns = ['CUST_NO', 'max_absamt_{}'.format(str(month))]\n",
    "\n",
    "    return tmp_df_day, tmp_df_amt\n",
    "\n",
    "def gen_aps_day_features_by_month(df_tr, tr_feature, dual_dir=True, postfix=''):\n",
    "    \"\"\"\n",
    "    生成按月维度的RFM特征\n",
    "    \n",
    "    特征包括:\n",
    "    1. 最近交易距今天数\n",
    "    2. 最大金额交易距今天数\n",
    "    3. 最近交易距最大金额交易的天数差\n",
    "    4. 流入流出金额/天数轧差\n",
    "    \"\"\"\n",
    "    if dual_dir:\n",
    "        # ========== 流入交易特征 ==========\n",
    "        df_tr_in = df_tr[df_tr[\"APSDTRAMT\"] > 0]\n",
    "        tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "        \n",
    "        for month in tqdm([0, 1, 2], desc=\"生成流入RFM特征\"):\n",
    "            df_tr_month = df_tr_in[df_tr_in[\"date_months_to_now\"] == month]\n",
    "            df_max_amt_days_to_now, df_max_amt = get_aps_max_amt_days_to_now(df_tr_month, month)\n",
    "            df_recent_days_to_now = get_aps_recent_days_to_now(df_tr_month, month)\n",
    "            \n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_recent_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature[\"maxamt_days_to_recent_{}\".format(str(month))] = \\\n",
    "                tmp_tr_feature[\"recent_days_to_now_{}\".format(str(month))] - \\\n",
    "                tmp_tr_feature[\"max_amt_days_to_now_{}\".format(str(month))]\n",
    "        \n",
    "        tmp_tr_feature.columns = [\"CUST_NO\"] + [\n",
    "            \"{}_{}_{}\".format(col, \"in\", postfix) for col in tmp_tr_feature.columns if col != \"CUST_NO\"\n",
    "        ]\n",
    "        tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\")\n",
    "        \n",
    "        # ========== 流出交易特征 ==========\n",
    "        df_tr_out = df_tr[df_tr[\"APSDTRAMT\"] < 0]\n",
    "        tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "        \n",
    "        for month in tqdm([0, 1, 2], desc=\"生成流出RFM特征\"):\n",
    "            df_tr_month = df_tr_out[df_tr_out[\"date_months_to_now\"] == month]\n",
    "            df_max_amt_days_to_now, df_max_amt = get_aps_max_amt_days_to_now(df_tr_month, month)\n",
    "            df_recent_days_to_now = get_aps_recent_days_to_now(df_tr_month, month)\n",
    "            \n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_recent_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature[\"maxamt_days_to_recent_{}\".format(str(month))] = \\\n",
    "                tmp_tr_feature[\"recent_days_to_now_{}\".format(str(month))] - \\\n",
    "                tmp_tr_feature[\"max_amt_days_to_now_{}\".format(str(month))]\n",
    "        \n",
    "        tmp_tr_feature.columns = [\"CUST_NO\"] + [\n",
    "            \"{}_{}_{}\".format(col, \"out\", postfix) for col in tmp_tr_feature.columns if col != \"CUST_NO\"\n",
    "        ]\n",
    "        tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\")\n",
    "    \n",
    "        # ========== 流入流出轧差特征 ==========\n",
    "        for month in [0, 1, 2]:\n",
    "            # 金额轧差\n",
    "            tr_feature[f\"in_out_max_absamt_diff_{month}_{postfix}\"] = \\\n",
    "                tr_feature[f\"max_absamt_{month}_in_{postfix}\"].abs() - \\\n",
    "                tr_feature[f\"max_absamt_{month}_out_{postfix}\"].abs()\n",
    "            \n",
    "            # 天数轧差\n",
    "            tr_feature[f\"in_out_maxamt_days_diff_{month}_{postfix}\"] = \\\n",
    "                tr_feature[f\"max_amt_days_to_now_{month}_in_{postfix}\"] - \\\n",
    "                tr_feature[f\"max_amt_days_to_now_{month}_out_{postfix}\"]\n",
    "    else:\n",
    "        tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "        for month in tqdm([0, 1, 2], desc=\"生成整体RFM特征\"):\n",
    "            df_tr_month = df_tr[df_tr[\"date_months_to_now\"] == month]\n",
    "            df_max_amt_days_to_now, df_max_amt = get_aps_max_amt_days_to_now(df_tr_month, month)\n",
    "            df_recent_days_to_now = get_aps_recent_days_to_now(df_tr_month, month)\n",
    "            \n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_recent_days_to_now, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature = tmp_tr_feature.merge(df_max_amt, how=\"left\", on=\"CUST_NO\")\n",
    "            tmp_tr_feature[\"maxamt_days_to_recent_{}_{}\".format(str(month), postfix)] = \\\n",
    "                df_recent_days_to_now[\"recent_days_to_now_{}\".format(str(month))] - \\\n",
    "                df_max_amt_days_to_now[\"max_amt_days_to_now_{}\".format(str(month))]\n",
    "        \n",
    "        tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\") \n",
    "        \n",
    "    return tr_feature\n",
    "\n",
    "print(\"RFM特征函数定义完成\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d7d4719e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "核心特征生成函数定义完成\n"
     ]
    }
   ],
   "source": [
    "# ==================== 核心特征生成函数 ====================\n",
    "\n",
    "def gen_aps_features_by_day(df_tr, tr_feature, postfix):\n",
    "    \"\"\"\n",
    "    生成活期交易表的全部特征\n",
    "    \n",
    "    特征模块:\n",
    "    1. 数据预处理: 按天/代码/渠道聚合\n",
    "    2. 滑窗统计: 按月/周/日维度的交易金额/笔数统计量\n",
    "    3. 分位数特征: 用户在群体中的相对位置\n",
    "    4. 交易代码分组统计\n",
    "    5. 渠道偏好特征\n",
    "    \"\"\"\n",
    "    print(f\"\\n{'='*80}\")\n",
    "    print(f\"开始生成活期交易特征 (postfix={postfix})\")\n",
    "    print(f\"{'='*80}\")\n",
    "    \n",
    "    # ========== 1. 数据预处理 ==========\n",
    "    print(\"数据预处理: 按天/代码/渠道聚合...\")\n",
    "    \n",
    "    # 1.1 按天+交易代码+渠道聚合金额\n",
    "    df_tr_by_day_cod_amt = df_tr.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \n",
    "        \"date_months_to_now\", \"APSDTRCOD\", \"APSDTRCHL\"\n",
    "    ]).agg({\"APSDTRAMT\": \"sum\"}).reset_index()\n",
    "    df_tr_by_day_cod_amt[\"APSDTRAMT\"].fillna(0, inplace=True)\n",
    "    \n",
    "    # 按天聚合(所有代码和渠道)\n",
    "    df_tr_by_day = df_tr_by_day_cod_amt.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \"date_months_to_now\"\n",
    "    ]).agg({\"APSDTRAMT\": \"sum\"}).reset_index()\n",
    "    \n",
    "    # 1.2 按天聚合交易代码数\n",
    "    df_tr_by_day_cod_ns = df_tr.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \n",
    "        \"date_months_to_now\", \"APSDTRCOD\"\n",
    "    ])[\"APSDTRCOD\"].agg(['nunique', 'count'])\n",
    "    df_tr_by_day_cod_ns.columns = ['nunique', 'count']\n",
    "    df_tr_by_day_cod_ns = df_tr_by_day_cod_ns.reset_index()\n",
    "    df_tr_by_day_cod = df_tr_by_day_cod_ns.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \"date_months_to_now\"\n",
    "    ])[['nunique', 'count']].agg(\"sum\").reset_index()\n",
    "    \n",
    "    # 1.3 按天聚合交易渠道数\n",
    "    df_tr_by_day_chl_ns = df_tr.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \n",
    "        \"date_months_to_now\", \"APSDTRCHL\"\n",
    "    ])[\"APSDTRCHL\"].agg(['nunique', 'count'])\n",
    "    df_tr_by_day_chl_ns.columns = ['nunique', 'count']\n",
    "    df_tr_by_day_chl_ns = df_tr_by_day_chl_ns.reset_index()\n",
    "    df_tr_by_day_chl = df_tr_by_day_chl_ns.groupby([\n",
    "        \"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \"date_months_to_now\"\n",
    "    ])[['nunique', 'count']].agg(\"sum\").reset_index()\n",
    "    \n",
    "    print(f\"按天聚合完成: {df_tr_by_day.shape[0]} 条记录\")\n",
    "    \n",
    "    # ========== 2. 滑窗统计特征 ==========\n",
    "    print(\"生成滑窗统计特征...\")\n",
    "    \n",
    "    tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 2.1 按月滑窗统计\n",
    "    for fea1 in [\"date_months_to_now\"]:\n",
    "        # 日交易金额趋势(8个统计量)\n",
    "        tmp_tr_feature, cols_amt = get_all_id_category_features(\n",
    "            tmp_tr_feature, df_tr_by_day, fea1=fea1, fea2='APSDTRAMT', \n",
    "            stats=['mean', 'max', 'min', 'median', 'std', 'sum', \"skew\", \"kurt\"]\n",
    "        )\n",
    "        \n",
    "        # 日交易笔数\n",
    "        tmp_tr_feature, _ = get_all_id_category_features(\n",
    "            tmp_tr_feature, df_tr_by_day_cod, fea1=fea1, fea2='count',\n",
    "            stats=[\"sum\"]\n",
    "        )\n",
    "        \n",
    "        # 日交易代码数\n",
    "        tmp_tr_feature, _ = get_all_id_category_features(\n",
    "            tmp_tr_feature, df_tr, fea1=fea1, fea2='APSDTRCOD',\n",
    "            stats=['nunique']\n",
    "        )\n",
    "        \n",
    "        # 日交易渠道数\n",
    "        tmp_tr_feature, _ = get_all_id_category_features(\n",
    "            tmp_tr_feature, df_tr, fea1=fea1, fea2='APSDTRCHL',\n",
    "            stats=[\"nunique\"]\n",
    "        )\n",
    "    \n",
    "    tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\")\n",
    "    print(f\"滑窗特征完成: 当前特征数 {len(tr_feature.columns)}\")\n",
    "    \n",
    "    # ========== 3. 分位数特征 ==========\n",
    "    print(\"生成分位数特征...\")\n",
    "    \n",
    "    # 每月月均交易金额是否大于1/4、1/2分位数\n",
    "    cols_amt_month_sum = [col for col in cols_amt if (\"date_months_to_now\" in col and \"sum\" in col)]\n",
    "    for col in tqdm(cols_amt_month_sum, desc=\"生成分位数特征\"):\n",
    "        tr_feature[\"{}_1_4_month\".format(str(col))] = (\n",
    "            tr_feature[\"{}\".format(col)].abs() > tr_feature[\"{}\".format(col)].quantile(0.25)\n",
    "        ).astype(float).abs()\n",
    "        tr_feature[\"{}_1_2_month\".format(str(col))] = (\n",
    "            tr_feature[\"{}\".format(col)].abs() > tr_feature[\"{}\".format(col)].quantile(0.5)\n",
    "        ).astype(float).abs()\n",
    "    \n",
    "    print(f\"分位数特征完成: 当前特征数 {len(tr_feature.columns)}\")\n",
    "    \n",
    "    # ========== 4. 交易渠道分组统计 ==========\n",
    "    print(\"生成交易渠道分组特征...\")\n",
    "    \n",
    "    tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 4.1 按渠道聚合交易金额\n",
    "    tmp_tr_feature, cols = get_all_id_category_features(\n",
    "        tmp_tr_feature, df_tr_by_day_cod_amt, fea1='APSDTRCHL', fea2='APSDTRAMT', \n",
    "        stats=['mean', 'max', 'min', 'median', 'std', 'sum', \"skew\", \"kurt\"]\n",
    "    )\n",
    "    \n",
    "    tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\")\n",
    "    \n",
    "    # 4.2 按渠道聚合交易笔数\n",
    "    cols_dict = dict()\n",
    "    tmp_tr_feature = tr_feature[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    \n",
    "    tmp_tr_feature, cols = get_id_category_features(\n",
    "        tmp_tr_feature, df_tr, fea1='APSDTRCHL', fea2='CUST_NO', stat='count'\n",
    "    )\n",
    "    cols_dict[\"APSDTRCHL\"] = cols\n",
    "    \n",
    "    tr_feature = tr_feature.merge(tmp_tr_feature, how=\"left\", on=\"CUST_NO\")\n",
    "    \n",
    "    print(f\"渠道分组特征完成: 当前特征数 {len(tr_feature.columns)}\")\n",
    "    \n",
    "    # ========== 5. 渠道偏好特征 ==========\n",
    "    print(\"生成渠道偏好特征...\")\n",
    "    \n",
    "    # 渠道偏好度 = 某渠道笔数 / 总笔数\n",
    "    tr_freq_chl = tr_feature[[\"CUST_NO\"] + cols_dict[\"APSDTRCHL\"]]\n",
    "    tr_freq = df_tr.groupby(['CUST_NO']).agg({'CUST_NO': 'count'})\n",
    "    tr_freq.columns = ['tr_freq']\n",
    "    tr_freq = tr_freq.reset_index(drop=False)\n",
    "    \n",
    "    for chl_col in tqdm(cols_dict[\"APSDTRCHL\"], desc=\"生成渠道偏好\"):\n",
    "        tr_prefer_chl = get_division_features(tr_freq_chl, tr_freq, chl_col, 'tr_freq')\n",
    "        # 去掉偏好度为0的渠道\n",
    "        if tr_prefer_chl[tr_prefer_chl.columns[1]].sum() > 0:\n",
    "            tr_feature = tr_feature.merge(tr_prefer_chl, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    print(f\"渠道偏好特征完成: 当前特征数 {len(tr_feature.columns)}\")\n",
    "    \n",
    "    # ========== 6. 扩展高级特征 ==========\n",
    "    print(\"生成扩展高级特征...\")\n",
    "    \n",
    "    # 6.1 时间序列特征\n",
    "    time_series_features = get_time_series_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(time_series_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.2 金额分布特征\n",
    "    amount_dist_features = get_amount_distribution_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(amount_dist_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.3 渠道多样性特征\n",
    "    channel_div_features = get_channel_diversity_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(channel_div_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.4 交易代码特征\n",
    "    code_features = get_transaction_code_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(code_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.5 行为模式特征\n",
    "    behavior_features = get_behavioral_pattern_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(behavior_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.6 高级分位数特征\n",
    "    advanced_quantile_features = get_advanced_quantile_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(advanced_quantile_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    # 6.7 稳定性特征\n",
    "    stability_features = get_stability_features(df_tr, tr_feature)\n",
    "    tr_feature = tr_feature.merge(stability_features, on=\"CUST_NO\", how=\"left\")\n",
    "    \n",
    "    print(f\"扩展特征完成: 当前特征数 {len(tr_feature.columns)}\")\n",
    "    \n",
    "    # ========== 7. 特征重命名 ==========\n",
    "    tr_feature.columns = [\"CUST_NO\"] + [\n",
    "        \"{}_{}\".format(col, postfix) for col in tr_feature.columns if col != \"CUST_NO\"\n",
    "    ]\n",
    "    \n",
    "    print(f\"特征生成完成! 最终特征数: {len(tr_feature.columns) - 1}\")\n",
    "    print(f\"{'='*80}\\n\")\n",
    "    \n",
    "    return tr_feature\n",
    "\n",
    "print(\"核心特征生成函数定义完成\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8b7e9dc",
   "metadata": {},
   "source": [
    "### 执行特征构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "bf0f1d0c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "客户数: 5,616\n",
      "交易记录数: 345,313\n",
      "人均交易笔数: 61.49\n"
     ]
    }
   ],
   "source": [
    "# ========== 初始化特征DataFrame ==========\n",
    "from copy import deepcopy\n",
    "\n",
    "aps_feature = tr_aps_dtl[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "print(f\"客户数: {aps_feature.shape[0]:,}\")\n",
    "print(f\"交易记录数: {tr_aps_dtl.shape[0]:,}\")\n",
    "print(f\"人均交易笔数: {tr_aps_dtl.shape[0] / aps_feature.shape[0]:.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "725fd818",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "开始构建流入交易特征\n",
      "================================================================================\n",
      "流入交易记录数: 84,354 (24.43%)\n",
      "流入客户数: 5,582\n",
      "\n",
      "================================================================================\n",
      "开始生成活期交易特征 (postfix=in)\n",
      "================================================================================\n",
      "数据预处理: 按天/代码/渠道聚合...\n",
      "按天聚合完成: 54226 条记录\n",
      "生成滑窗统计特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成date_months_to_now分组特征: 100%|██████████| 8/8 [00:00<00:00, 21.29it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 97.61it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 45.70it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 47.97it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "滑窗特征完成: 当前特征数 34\n",
      "生成分位数特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成分位数特征: 100%|██████████| 3/3 [00:00<00:00, 1000.07it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "分位数特征完成: 当前特征数 40\n",
      "生成交易渠道分组特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成APSDTRCHL分组特征: 100%|██████████| 8/8 [00:00<00:00, 14.85it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "渠道分组特征完成: 当前特征数 239\n",
      "生成渠道偏好特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成渠道偏好: 100%|██████████| 24/24 [00:00<00:00, 130.09it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "渠道偏好特征完成: 当前特征数 263\n",
      "生成扩展高级特征...\n",
      "生成时间序列特征...\n",
      "时间序列特征: 6 个\n",
      "生成金额分布特征...\n",
      "金额分布特征: 9 个\n",
      "生成渠道多样性特征...\n",
      "渠道多样性特征: 5 个\n",
      "生成交易代码特征...\n",
      "交易代码特征: 3 个\n",
      "生成行为模式特征...\n",
      "行为模式特征: 5 个\n",
      "生成高级分位数特征...\n",
      "高级分位数特征: 9 个\n",
      "生成稳定性特征...\n",
      "稳定性特征: 4 个\n",
      "扩展特征完成: 当前特征数 304\n",
      "特征生成完成! 最终特征数: 303\n",
      "================================================================================\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# ========== 流入交易特征 (APSDTRAMT >= 0) ==========\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"开始构建流入交易特征\")\n",
    "print(\"=\"*80)\n",
    "aps_in = deepcopy(tr_aps_dtl[tr_aps_dtl[\"APSDTRAMT\"] >= 0])\n",
    "print(f\"流入交易记录数: {aps_in.shape[0]:,} ({aps_in.shape[0]/tr_aps_dtl.shape[0]*100:.2f}%)\")\n",
    "print(f\"流入客户数: {aps_in['CUST_NO'].nunique():,}\")\n",
    "\n",
    "aps_feature_in = gen_aps_features_by_day(aps_in, aps_feature, postfix='in')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "43b97407",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "开始构建流出交易特征\n",
      "================================================================================\n",
      "流出交易记录数: 260,959 (75.57%)\n",
      "流出客户数: 4,857\n",
      "\n",
      "================================================================================\n",
      "开始生成活期交易特征 (postfix=out)\n",
      "================================================================================\n",
      "数据预处理: 按天/代码/渠道聚合...\n",
      "按天聚合完成: 97882 条记录\n",
      "生成滑窗统计特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成date_months_to_now分组特征: 100%|██████████| 8/8 [00:00<00:00, 17.75it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 72.80it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 21.06it/s]\n",
      "生成date_months_to_now分组特征: 100%|██████████| 1/1 [00:00<00:00, 21.07it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "滑窗特征完成: 当前特征数 34\n",
      "生成分位数特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成分位数特征: 100%|██████████| 3/3 [00:00<00:00, 993.13it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "分位数特征完成: 当前特征数 40\n",
      "生成交易渠道分组特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成APSDTRCHL分组特征: 100%|██████████| 8/8 [00:00<00:00, 16.37it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "渠道分组特征完成: 当前特征数 247\n",
      "生成渠道偏好特征...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成渠道偏好: 100%|██████████| 24/24 [00:00<00:00, 119.64it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "渠道偏好特征完成: 当前特征数 271\n",
      "生成扩展高级特征...\n",
      "生成时间序列特征...\n",
      "时间序列特征: 6 个\n",
      "生成金额分布特征...\n",
      "金额分布特征: 9 个\n",
      "生成渠道多样性特征...\n",
      "渠道多样性特征: 5 个\n",
      "生成交易代码特征...\n",
      "交易代码特征: 3 个\n",
      "生成行为模式特征...\n",
      "行为模式特征: 5 个\n",
      "生成高级分位数特征...\n",
      "高级分位数特征: 9 个\n",
      "生成稳定性特征...\n",
      "稳定性特征: 4 个\n",
      "扩展特征完成: 当前特征数 312\n",
      "特征生成完成! 最终特征数: 311\n",
      "================================================================================\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# ========== 流出交易特征 (APSDTRAMT < 0) ==========\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"开始构建流出交易特征\")\n",
    "print(\"=\"*80)\n",
    "aps_out = deepcopy(tr_aps_dtl[tr_aps_dtl[\"APSDTRAMT\"] < 0])\n",
    "print(f\"流出交易记录数: {aps_out.shape[0]:,} ({aps_out.shape[0]/tr_aps_dtl.shape[0]*100:.2f}%)\")\n",
    "print(f\"流出客户数: {aps_out['CUST_NO'].nunique():,}\")\n",
    "\n",
    "aps_feature_out = gen_aps_features_by_day(aps_out, aps_feature, postfix='out')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "8deb4b72",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "开始构建RFM特征\n",
      "================================================================================\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "生成流入RFM特征: 100%|██████████| 3/3 [00:00<00:00, 48.35it/s]\n",
      "生成流出RFM特征: 100%|██████████| 3/3 [00:00<00:00, 25.82it/s]\n"
     ]
    }
   ],
   "source": [
    "# ========== RFM特征 (整体+流入流出分别) ==========\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"开始构建RFM特征\")\n",
    "print(\"=\"*80)\n",
    "aps_feature = gen_aps_day_features_by_month(tr_aps_dtl, aps_feature, dual_dir=True, postfix='')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "f93cd584",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "合并所有特征\n",
      "================================================================================\n",
      "合并流入特征后: (5616, 334)\n",
      "合并流出特征后: (5616, 645)\n"
     ]
    }
   ],
   "source": [
    "# ========== 合并所有特征 ==========\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"合并所有特征\")\n",
    "print(\"=\"*80)\n",
    "aps_feature = aps_feature.merge(aps_feature_in, how=\"left\", on=\"CUST_NO\")\n",
    "print(f\"合并流入特征后: {aps_feature.shape}\")\n",
    "\n",
    "aps_feature = aps_feature.merge(aps_feature_out, how=\"left\", on=\"CUST_NO\")\n",
    "print(f\"合并流出特征后: {aps_feature.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "fb5518c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "活期交易表特征工程完成!\n",
      "   最终特征数: 644\n",
      "   客户覆盖率: 5616 / 5975\n"
     ]
    }
   ],
   "source": [
    "# 添加前缀\n",
    "aps_feature.columns = [\"CUST_NO\"] + [\n",
    "    \"aps_{}\".format(col) for col in aps_feature.columns if col != \"CUST_NO\"\n",
    "]\n",
    "\n",
    "print(f\"\\n活期交易表特征工程完成!\")\n",
    "print(f\"   最终特征数: {aps_feature.shape[1] - 1}\")\n",
    "print(f\"   客户覆盖率: {aps_feature['CUST_NO'].nunique()} / {TARGET_data['CUST_NO'].nunique()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "fe6d1fb8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>aps_max_amt_days_to_now_0_in_</th>\n",
       "      <th>aps_recent_days_to_now_0_in_</th>\n",
       "      <th>aps_max_absamt_0_in_</th>\n",
       "      <th>aps_maxamt_days_to_recent_0_in_</th>\n",
       "      <th>aps_max_amt_days_to_now_1_in_</th>\n",
       "      <th>aps_recent_days_to_now_1_in_</th>\n",
       "      <th>aps_max_absamt_1_in_</th>\n",
       "      <th>aps_maxamt_days_to_recent_1_in_</th>\n",
       "      <th>aps_max_amt_days_to_now_2_in_</th>\n",
       "      <th>...</th>\n",
       "      <th>aps_relative_position_q25_out</th>\n",
       "      <th>aps_relative_position_q50_out</th>\n",
       "      <th>aps_relative_position_q75_out</th>\n",
       "      <th>aps_relative_position_q90_out</th>\n",
       "      <th>aps_relative_position_q95_out</th>\n",
       "      <th>aps_iqr_span_out</th>\n",
       "      <th>aps_monthly_amount_stability_out</th>\n",
       "      <th>aps_monthly_count_stability_out</th>\n",
       "      <th>aps_monthly_growth_rate_out</th>\n",
       "      <th>aps_amount_trend_slope_out</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3abac600050b2b3ad8876a1caf85beb9</td>\n",
       "      <td>12.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>80000.00</td>\n",
       "      <td>-12.0</td>\n",
       "      <td>31.0</td>\n",
       "      <td>31.0</td>\n",
       "      <td>70000.00</td>\n",
       "      <td>0.0</td>\n",
       "      <td>89.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>3906.67</td>\n",
       "      <td>-0.122390</td>\n",
       "      <td>0.185265</td>\n",
       "      <td>-0.240408</td>\n",
       "      <td>-3782.860</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>ddcdd6152f2648b5ed0b542fb770928c</td>\n",
       "      <td>17.0</td>\n",
       "      <td>9.0</td>\n",
       "      <td>100000.00</td>\n",
       "      <td>-8.0</td>\n",
       "      <td>31.0</td>\n",
       "      <td>31.0</td>\n",
       "      <td>5000.00</td>\n",
       "      <td>0.0</td>\n",
       "      <td>89.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>44250.00</td>\n",
       "      <td>-1.645064</td>\n",
       "      <td>0.866025</td>\n",
       "      <td>-27.604180</td>\n",
       "      <td>193249.310</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>73ca3558553b672f53f1a173f46aec24</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>8215.18</td>\n",
       "      <td>0.0</td>\n",
       "      <td>37.0</td>\n",
       "      <td>37.0</td>\n",
       "      <td>7193.55</td>\n",
       "      <td>0.0</td>\n",
       "      <td>88.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>32.50</td>\n",
       "      <td>-0.462746</td>\n",
       "      <td>0.100791</td>\n",
       "      <td>0.426570</td>\n",
       "      <td>-5469.245</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>889675d1d1b93d771f246b41606562f0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>30000.00</td>\n",
       "      <td>0.0</td>\n",
       "      <td>51.0</td>\n",
       "      <td>37.0</td>\n",
       "      <td>20000.00</td>\n",
       "      <td>-14.0</td>\n",
       "      <td>80.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>100.00</td>\n",
       "      <td>-0.421163</td>\n",
       "      <td>0.547153</td>\n",
       "      <td>0.352916</td>\n",
       "      <td>4195.655</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>62f69a160f6b618250ba3c8f2b67bb52</td>\n",
       "      <td>30.0</td>\n",
       "      <td>9.0</td>\n",
       "      <td>30000.00</td>\n",
       "      <td>-21.0</td>\n",
       "      <td>33.0</td>\n",
       "      <td>31.0</td>\n",
       "      <td>76500.00</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>83.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>915.00</td>\n",
       "      <td>-0.921565</td>\n",
       "      <td>1.289518</td>\n",
       "      <td>-180.638723</td>\n",
       "      <td>33573.500</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 645 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                            CUST_NO  aps_max_amt_days_to_now_0_in_  \\\n",
       "0  3abac600050b2b3ad8876a1caf85beb9                           12.0   \n",
       "1  ddcdd6152f2648b5ed0b542fb770928c                           17.0   \n",
       "2  73ca3558553b672f53f1a173f46aec24                            0.0   \n",
       "3  889675d1d1b93d771f246b41606562f0                            4.0   \n",
       "4  62f69a160f6b618250ba3c8f2b67bb52                           30.0   \n",
       "\n",
       "   aps_recent_days_to_now_0_in_  aps_max_absamt_0_in_  \\\n",
       "0                           0.0              80000.00   \n",
       "1                           9.0             100000.00   \n",
       "2                           0.0               8215.18   \n",
       "3                           4.0              30000.00   \n",
       "4                           9.0              30000.00   \n",
       "\n",
       "   aps_maxamt_days_to_recent_0_in_  aps_max_amt_days_to_now_1_in_  \\\n",
       "0                            -12.0                           31.0   \n",
       "1                             -8.0                           31.0   \n",
       "2                              0.0                           37.0   \n",
       "3                              0.0                           51.0   \n",
       "4                            -21.0                           33.0   \n",
       "\n",
       "   aps_recent_days_to_now_1_in_  aps_max_absamt_1_in_  \\\n",
       "0                          31.0              70000.00   \n",
       "1                          31.0               5000.00   \n",
       "2                          37.0               7193.55   \n",
       "3                          37.0              20000.00   \n",
       "4                          31.0              76500.00   \n",
       "\n",
       "   aps_maxamt_days_to_recent_1_in_  aps_max_amt_days_to_now_2_in_  ...  \\\n",
       "0                              0.0                           89.0  ...   \n",
       "1                              0.0                           89.0  ...   \n",
       "2                              0.0                           88.0  ...   \n",
       "3                            -14.0                           80.0  ...   \n",
       "4                             -2.0                           83.0  ...   \n",
       "\n",
       "   aps_relative_position_q25_out  aps_relative_position_q50_out  \\\n",
       "0                            1.0                            1.0   \n",
       "1                            1.0                            1.0   \n",
       "2                            1.0                            1.0   \n",
       "3                            1.0                            1.0   \n",
       "4                            1.0                            1.0   \n",
       "\n",
       "   aps_relative_position_q75_out  aps_relative_position_q90_out  \\\n",
       "0                            1.0                            1.0   \n",
       "1                            1.0                            1.0   \n",
       "2                            0.0                            0.0   \n",
       "3                            0.0                            0.0   \n",
       "4                            1.0                            1.0   \n",
       "\n",
       "   aps_relative_position_q95_out  aps_iqr_span_out  \\\n",
       "0                            0.0           3906.67   \n",
       "1                            1.0          44250.00   \n",
       "2                            0.0             32.50   \n",
       "3                            0.0            100.00   \n",
       "4                            1.0            915.00   \n",
       "\n",
       "   aps_monthly_amount_stability_out  aps_monthly_count_stability_out  \\\n",
       "0                         -0.122390                         0.185265   \n",
       "1                         -1.645064                         0.866025   \n",
       "2                         -0.462746                         0.100791   \n",
       "3                         -0.421163                         0.547153   \n",
       "4                         -0.921565                         1.289518   \n",
       "\n",
       "   aps_monthly_growth_rate_out  aps_amount_trend_slope_out  \n",
       "0                    -0.240408                   -3782.860  \n",
       "1                   -27.604180                  193249.310  \n",
       "2                     0.426570                   -5469.245  \n",
       "3                     0.352916                    4195.655  \n",
       "4                  -180.638723                   33573.500  \n",
       "\n",
       "[5 rows x 645 columns]"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "aps_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "63c3dc06",
   "metadata": {},
   "source": [
    "### 保存特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "b622ff9e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征已保存到: ./feature/tr_aps_dtl_features.pkl\n",
      "文件大小: 27.82 MB\n"
     ]
    }
   ],
   "source": [
    "# 检查feature目录\n",
    "feature_dir = 'feature'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建目录: {feature_dir}\")\n",
    "\n",
    "# 保存为pickle格式\n",
    "feature_path = './feature/tr_aps_dtl_features.pkl'\n",
    "aps_feature.to_pickle(feature_path)\n",
    "print(f\"特征已保存到: {feature_path}\")\n",
    "print(f\"文件大小: {os.path.getsize(feature_path) / 1024 / 1024:.2f} MB\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
