{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "09f7126f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c6ab2b72",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "import seaborn as sns\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a08d6044",
   "metadata": {},
   "source": [
    "## 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df055add",
   "metadata": {},
   "source": [
    "## 通用导入函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "74bcbf7b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "993c86ce",
   "metadata": {},
   "source": [
    "## 训练集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "722d1e40",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 TRAIN_ASSET_data 已加载为 DataFrame\n",
      "数据集 TRAIN_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_CUST_INFO_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_TRNFLW_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_NATURE_data 已加载为 DataFrame\n",
      "数据集 TRAIN_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TR_APS_DTL_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = './data/Train'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "82e70968",
   "metadata": {},
   "source": [
    "## 测试集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c3ef0f15",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 A_ASSET_data 已加载为 DataFrame\n",
      "数据集 A_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_CUST_INFO_data 已加载为 DataFrame\n",
      "数据集 A_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_TRNFLW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 A_TEST_NATURE_data 已加载为 DataFrame\n",
      "数据集 A_TR_APS_DTL_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "A_load_dt = './data/A'\n",
    "A_data_name = load_data_from_directory(A_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b6a9129f",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "114e171e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "活期交易明细表(TR_APS_DTL)特征工程\n",
      "================================================================================\n",
      "\n",
      "训练集形状: (2000, 8)\n",
      "测试集形状: (1000, 8)\n",
      "\n",
      "训练集客户数: 654\n",
      "测试集客户数: 348\n"
     ]
    }
   ],
   "source": [
    "print(\"=\" * 80)\n",
    "print(\"活期交易明细表(TR_APS_DTL)特征工程\")\n",
    "print(\"=\" * 80)\n",
    "\n",
    "tr_aps_train = TRAIN_TR_APS_DTL_data.copy()\n",
    "tr_aps_test = A_TR_APS_DTL_data.copy()\n",
    "\n",
    "print(f\"\\n训练集形状: {tr_aps_train.shape}\")\n",
    "print(f\"测试集形状: {tr_aps_test.shape}\")\n",
    "print(f\"\\n训练集客户数: {tr_aps_train['CUST_NO'].nunique():,}\")\n",
    "print(f\"测试集客户数: {tr_aps_test['CUST_NO'].nunique():,}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "55801cad",
   "metadata": {},
   "source": [
    "## 数据预处理函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3c8ae464",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据预处理函数已定义\n"
     ]
    }
   ],
   "source": [
    "def preprocess_tr_aps_data(df):\n",
    "    \"\"\"\n",
    "    活期交易明细表数据预处理\n",
    "    \"\"\"\n",
    "    df = df.copy()\n",
    "    \n",
    "    # 1. 日期时间处理\n",
    "    df['APSDTRDAT'] = pd.to_datetime(df['APSDTRDAT'].astype(str), format='%Y%m%d')\n",
    "    \n",
    "    # 2. 计算距今天数\n",
    "    end_date = df['APSDTRDAT'].max()\n",
    "    df['days_to_now'] = (end_date - df['APSDTRDAT']).dt.days\n",
    "    \n",
    "    # 3. 提取时间特征\n",
    "    df['year'] = df['APSDTRDAT'].dt.year\n",
    "    df['month'] = df['APSDTRDAT'].dt.month\n",
    "    df['day'] = df['APSDTRDAT'].dt.day\n",
    "    df['weekday'] = df['APSDTRDAT'].dt.weekday\n",
    "    df['is_weekend'] = df['weekday'].isin([5, 6]).astype(int)\n",
    "    df['week_of_year'] = df['APSDTRDAT'].dt.isocalendar().week\n",
    "    \n",
    "    # 4. 时间段特征(早中晚)\n",
    "    if 'APSDTRTIME' in df.columns:\n",
    "        df['hour'] = df['APSDTRTIME'].astype(str).str.zfill(6).str[:2].astype(int)\n",
    "        df['time_period'] = pd.cut(df['hour'], bins=[0, 6, 12, 18, 24], \n",
    "                                    labels=['dawn', 'morning', 'afternoon', 'night'], \n",
    "                                    include_lowest=True)\n",
    "    \n",
    "    # 5. 金额处理\n",
    "    df['APSDTRAMT_abs'] = df['APSDTRAMT'].abs()\n",
    "    df['is_income'] = (df['APSDTRAMT'] >= 0).astype(int)\n",
    "    \n",
    "    # 6. 金额分段\n",
    "    df['amt_level'] = pd.cut(df['APSDTRAMT_abs'], \n",
    "                              bins=[0, 100, 500, 1000, 5000, 10000, 50000, float('inf')],\n",
    "                              labels=['very_small', 'small', 'medium', 'large', 'very_large', 'huge', 'mega'])\n",
    "    \n",
    "    # 7. 排序\n",
    "    df = df.sort_values(['CUST_NO', 'APSDTRDAT', 'APSDTRTIME']).reset_index(drop=True)\n",
    "    \n",
    "    print(f\"预处理完成:\")\n",
    "    print(f\"  - 数据形状: {df.shape}\")\n",
    "    print(f\"  - 日期范围: {df['APSDTRDAT'].min()} 至 {df['APSDTRDAT'].max()}\")\n",
    "    print(f\"  - 总天数: {(df['APSDTRDAT'].max() - df['APSDTRDAT'].min()).days}天\")\n",
    "    print(f\"  - 客户数: {df['CUST_NO'].nunique():,}\")\n",
    "    \n",
    "    return df\n",
    "\n",
    "print(\"数据预处理函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5a72ab5d",
   "metadata": {},
   "source": [
    "## 特征工程函数定义"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "61d46ec7",
   "metadata": {},
   "source": [
    "### 1. 基础统计特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ef7588e7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "基础统计特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def basic_statistics_features(df, windows=[7, 15, 30, 60, 90]):\n",
    "    \"\"\"\n",
    "    基础统计特征:交易金额、笔数、活跃度等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        if len(sub_df) == 0:\n",
    "            continue\n",
    "        \n",
    "        # 交易金额统计\n",
    "        amt_stats = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "            'sum', 'mean', 'std', 'median', 'max', 'min', \n",
    "            ('q25', lambda x: x.quantile(0.25)),\n",
    "            ('q75', lambda x: x.quantile(0.75))\n",
    "        ]).reset_index()\n",
    "        amt_stats.columns = ['CUST_NO'] + [f'aps_amt_{window}d_{c}' for c in \n",
    "                            ['sum', 'mean', 'std', 'median', 'max', 'min', 'q25', 'q75']]\n",
    "        features = features.merge(amt_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易笔数\n",
    "        txn_count = sub_df.groupby('CUST_NO').size().reset_index(name=f'aps_count_{window}d')\n",
    "        features = features.merge(txn_count, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易活跃天数\n",
    "        active_days = sub_df.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "        active_days.columns = ['CUST_NO', f'aps_active_days_{window}d']\n",
    "        features = features.merge(active_days, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 日均交易金额和笔数\n",
    "        features[f'aps_daily_amt_avg_{window}d'] = features[f'aps_amt_{window}d_sum'] / window\n",
    "        features[f'aps_daily_count_avg_{window}d'] = features[f'aps_count_{window}d'] / window\n",
    "        \n",
    "        # 活跃度比率\n",
    "        features[f'aps_activity_rate_{window}d'] = features[f'aps_active_days_{window}d'] / window\n",
    "        \n",
    "        # 交易变异系数\n",
    "        features[f'aps_amt_cv_{window}d'] = features[f'aps_amt_{window}d_std'] / (features[f'aps_amt_{window}d_mean'] + 1e-5)\n",
    "        \n",
    "        # 交易范围\n",
    "        features[f'aps_amt_range_{window}d'] = features[f'aps_amt_{window}d_max'] - features[f'aps_amt_{window}d_min']\n",
    "        \n",
    "        # 四分位距\n",
    "        features[f'aps_amt_iqr_{window}d'] = features[f'aps_amt_{window}d_q75'] - features[f'aps_amt_{window}d_q25']\n",
    "        \n",
    "        # 平均每活跃日交易笔数\n",
    "        features[f'aps_count_per_active_day_{window}d'] = features[f'aps_count_{window}d'] / (features[f'aps_active_days_{window}d'] + 1)\n",
    "        \n",
    "        # 平均每活跃日交易金额\n",
    "        features[f'aps_amt_per_active_day_{window}d'] = features[f'aps_amt_{window}d_sum'] / (features[f'aps_active_days_{window}d'] + 1)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"基础统计特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5abb934a",
   "metadata": {},
   "source": [
    "### 2. 流入流出分离特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "c68ac173",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "流入流出分离特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def inflow_outflow_features(df, windows=[7, 15, 30, 60, 90]):\n",
    "    \"\"\"\n",
    "    流入流出分离特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    df_in = df[df['is_income'] == 1].copy()\n",
    "    df_out = df[df['is_income'] == 0].copy()\n",
    "    \n",
    "    for window in windows:\n",
    "        in_sub = df_in[df_in['days_to_now'] < window]\n",
    "        out_sub = df_out[df_out['days_to_now'] < window]\n",
    "        \n",
    "        # 流入统计\n",
    "        if len(in_sub) > 0:\n",
    "            in_stats = in_sub.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "                'sum', 'mean', 'count', 'max', 'min', 'std', 'median'\n",
    "            ]).reset_index()\n",
    "            in_stats.columns = ['CUST_NO'] + [f'aps_in_{window}d_{c}' for c in \n",
    "                                ['sum', 'mean', 'count', 'max', 'min', 'std', 'median']]\n",
    "            features = features.merge(in_stats, on='CUST_NO', how='left')\n",
    "            \n",
    "            # 流入活跃天数\n",
    "            in_active = in_sub.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "            in_active.columns = ['CUST_NO', f'aps_in_active_days_{window}d']\n",
    "            features = features.merge(in_active, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流出统计\n",
    "        if len(out_sub) > 0:\n",
    "            out_stats = out_sub.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "                'sum', 'mean', 'count', 'max', 'min', 'std', 'median'\n",
    "            ]).reset_index()\n",
    "            out_stats.columns = ['CUST_NO'] + [f'aps_out_{window}d_{c}' for c in \n",
    "                                ['sum', 'mean', 'count', 'max', 'min', 'std', 'median']]\n",
    "            features = features.merge(out_stats, on='CUST_NO', how='left')\n",
    "            \n",
    "            # 流出活跃天数\n",
    "            out_active = out_sub.groupby('CUST_NO')['APSDTRDAT'].nunique().reset_index()\n",
    "            out_active.columns = ['CUST_NO', f'aps_out_active_days_{window}d']\n",
    "            features = features.merge(out_active, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 流入流出比率特征\n",
    "        features[f'aps_in_out_amt_ratio_{window}d'] = features[f'aps_in_{window}d_sum'] / (features[f'aps_out_{window}d_sum'] + 1e-5)\n",
    "        features[f'aps_in_out_count_ratio_{window}d'] = features[f'aps_in_{window}d_count'] / (features[f'aps_out_{window}d_count'] + 1e-5)\n",
    "        \n",
    "        # 净流入\n",
    "        features[f'aps_net_inflow_{window}d'] = features[f'aps_in_{window}d_sum'] - features[f'aps_out_{window}d_sum']\n",
    "        \n",
    "        # 流入流出活跃度差异\n",
    "        features[f'aps_in_out_active_diff_{window}d'] = features[f'aps_in_active_days_{window}d'] - features[f'aps_out_active_days_{window}d']\n",
    "        \n",
    "        # 流入流出频率差异\n",
    "        features[f'aps_in_out_freq_diff_{window}d'] = (features[f'aps_in_{window}d_count'] / (features[f'aps_in_active_days_{window}d'] + 1)) - \\\n",
    "                                                        (features[f'aps_out_{window}d_count'] / (features[f'aps_out_active_days_{window}d'] + 1))\n",
    "        \n",
    "        # 流入占比\n",
    "        total_amt = features[f'aps_in_{window}d_sum'].fillna(0) + features[f'aps_out_{window}d_sum'].fillna(0)\n",
    "        features[f'aps_in_amt_pct_{window}d'] = features[f'aps_in_{window}d_sum'] / (total_amt + 1e-5)\n",
    "        \n",
    "        # 流入笔数占比\n",
    "        total_count = features[f'aps_in_{window}d_count'].fillna(0) + features[f'aps_out_{window}d_count'].fillna(0)\n",
    "        features[f'aps_in_count_pct_{window}d'] = features[f'aps_in_{window}d_count'] / (total_count + 1e-5)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"流入流出分离特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1ba39549",
   "metadata": {},
   "source": [
    "### 3. 时序趋势特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "766b0b40",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "时序趋势特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def time_trend_features(df):\n",
    "    \"\"\"\n",
    "    时序趋势特征:近期vs远期对比、环比变化等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    # 时间窗口对比\n",
    "    comparisons = [(7, 30), (7, 60), (7, 90), (30, 60), (30, 90), (60, 90)]\n",
    "    \n",
    "    for short_w, long_w in comparisons:\n",
    "        short_df = df[df['days_to_now'] < short_w]\n",
    "        long_df = df[df['days_to_now'] < long_w]\n",
    "        \n",
    "        # 金额对比\n",
    "        short_amt = short_df.groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "        short_amt.columns = ['CUST_NO', 'short_amt']\n",
    "        long_amt = long_df.groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "        long_amt.columns = ['CUST_NO', 'long_amt']\n",
    "        \n",
    "        compare = features[['CUST_NO']].merge(short_amt, on='CUST_NO', how='left')\n",
    "        compare = compare.merge(long_amt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 日均金额比率\n",
    "        compare[f'aps_amt_ratio_{short_w}d_{long_w}d'] = (compare['short_amt'] / short_w) / ((compare['long_amt'] / long_w) + 1e-5)\n",
    "        features = features.merge(compare[['CUST_NO', f'aps_amt_ratio_{short_w}d_{long_w}d']], on='CUST_NO', how='left')\n",
    "        \n",
    "        # 笔数对比\n",
    "        short_cnt = short_df.groupby('CUST_NO').size().reset_index(name='short_cnt')\n",
    "        long_cnt = long_df.groupby('CUST_NO').size().reset_index(name='long_cnt')\n",
    "        \n",
    "        compare = features[['CUST_NO']].merge(short_cnt, on='CUST_NO', how='left')\n",
    "        compare = compare.merge(long_cnt, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 日均笔数比率\n",
    "        compare[f'aps_count_ratio_{short_w}d_{long_w}d'] = (compare['short_cnt'] / short_w) / ((compare['long_cnt'] / long_w) + 1e-5)\n",
    "        features = features.merge(compare[['CUST_NO', f'aps_count_ratio_{short_w}d_{long_w}d']], on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月度趋势\n",
    "    monthly_amt = df.groupby(['CUST_NO', 'month'])['APSDTRAMT_abs'].sum().reset_index()\n",
    "    monthly_std = monthly_amt.groupby('CUST_NO')['APSDTRAMT_abs'].std().reset_index()\n",
    "    monthly_std.columns = ['CUST_NO', 'aps_monthly_amt_std']\n",
    "    features = features.merge(monthly_std, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周度趋势\n",
    "    weekly_amt = df.groupby(['CUST_NO', 'week_of_year'])['APSDTRAMT_abs'].sum().reset_index()\n",
    "    weekly_std = weekly_amt.groupby('CUST_NO')['APSDTRAMT_abs'].std().reset_index()\n",
    "    weekly_std.columns = ['CUST_NO', 'aps_weekly_amt_std']\n",
    "    features = features.merge(weekly_std, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 最近三个月环比\n",
    "    month_periods = [(0, 30), (30, 60), (60, 90)]\n",
    "    for i, (start, end) in enumerate(month_periods, 1):\n",
    "        period_df = df[(df['days_to_now'] >= start) & (df['days_to_now'] < end)]\n",
    "        period_amt = period_df.groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "        period_amt.columns = ['CUST_NO', f'aps_month{i}_amt']\n",
    "        features = features.merge(period_amt, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 环比增长率\n",
    "    features['aps_month1_month2_growth'] = (features['aps_month1_amt'] - features['aps_month2_amt']) / (features['aps_month2_amt'] + 1e-5)\n",
    "    features['aps_month2_month3_growth'] = (features['aps_month2_amt'] - features['aps_month3_amt']) / (features['aps_month3_amt'] + 1e-5)\n",
    "    \n",
    "    # 趋势判断(增长、平稳、下降)\n",
    "    features['aps_trend_increasing'] = ((features['aps_month1_amt'] > features['aps_month2_amt']) & \n",
    "                                        (features['aps_month2_amt'] > features['aps_month3_amt'])).astype(int)\n",
    "    features['aps_trend_decreasing'] = ((features['aps_month1_amt'] < features['aps_month2_amt']) & \n",
    "                                        (features['aps_month2_amt'] < features['aps_month3_amt'])).astype(int)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"时序趋势特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5dcc0d34",
   "metadata": {},
   "source": [
    "### 4. 周期性特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "78e77e38",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "周期性特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def periodicity_features(df):\n",
    "    \"\"\"\n",
    "    周期性特征:工作日/周末、月初/月中/月末、时段等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    # 工作日vs周末\n",
    "    weekday_df = df[df['is_weekend'] == 0]\n",
    "    weekend_df = df[df['is_weekend'] == 1]\n",
    "    \n",
    "    # 工作日统计\n",
    "    if len(weekday_df) > 0:\n",
    "        weekday_stats = weekday_df.groupby('CUST_NO').agg({\n",
    "            'APSDTRAMT_abs': ['sum', 'mean', 'count', 'max'],\n",
    "            'APSDTRDAT': 'nunique'\n",
    "        }).reset_index()\n",
    "        weekday_stats.columns = ['CUST_NO', 'aps_weekday_amt_sum', 'aps_weekday_amt_mean',\n",
    "                                  'aps_weekday_count', 'aps_weekday_amt_max', 'aps_weekday_active_days']\n",
    "        features = features.merge(weekday_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周末统计\n",
    "    if len(weekend_df) > 0:\n",
    "        weekend_stats = weekend_df.groupby('CUST_NO').agg({\n",
    "            'APSDTRAMT_abs': ['sum', 'mean', 'count', 'max'],\n",
    "            'APSDTRDAT': 'nunique'\n",
    "        }).reset_index()\n",
    "        weekend_stats.columns = ['CUST_NO', 'aps_weekend_amt_sum', 'aps_weekend_amt_mean',\n",
    "                                  'aps_weekend_count', 'aps_weekend_amt_max', 'aps_weekend_active_days']\n",
    "        features = features.merge(weekend_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 工作日/周末比率(处理没有周末数据的情况)\n",
    "    if 'aps_weekday_amt_sum' in features.columns and 'aps_weekend_amt_sum' in features.columns:\n",
    "        features['aps_weekday_weekend_amt_ratio'] = features['aps_weekday_amt_sum'] / (features['aps_weekend_amt_sum'] + 1e-5)\n",
    "        features['aps_weekday_weekend_count_ratio'] = features['aps_weekday_count'] / (features['aps_weekend_count'] + 1e-5)\n",
    "        features['aps_weekday_amt_pct'] = features['aps_weekday_amt_sum'] / (features['aps_weekday_amt_sum'].fillna(0) + features['aps_weekend_amt_sum'].fillna(0) + 1e-5)\n",
    "    elif 'aps_weekday_amt_sum' in features.columns:\n",
    "        # 只有工作日数据\n",
    "        features['aps_weekend_amt_sum'] = 0\n",
    "        features['aps_weekend_amt_mean'] = 0\n",
    "        features['aps_weekend_count'] = 0\n",
    "        features['aps_weekend_amt_max'] = 0\n",
    "        features['aps_weekend_active_days'] = 0\n",
    "        features['aps_weekday_weekend_amt_ratio'] = 0\n",
    "        features['aps_weekday_weekend_count_ratio'] = 0\n",
    "        features['aps_weekday_amt_pct'] = 1.0\n",
    "    elif 'aps_weekend_amt_sum' in features.columns:\n",
    "        # 只有周末数据\n",
    "        features['aps_weekday_amt_sum'] = 0\n",
    "        features['aps_weekday_amt_mean'] = 0\n",
    "        features['aps_weekday_count'] = 0\n",
    "        features['aps_weekday_amt_max'] = 0\n",
    "        features['aps_weekday_active_days'] = 0\n",
    "        features['aps_weekday_weekend_amt_ratio'] = 0\n",
    "        features['aps_weekday_weekend_count_ratio'] = 0\n",
    "        features['aps_weekday_amt_pct'] = 0.0\n",
    "    \n",
    "    # 月初/月中/月末\n",
    "    df_copy = df.copy()\n",
    "    df_copy['month_period'] = pd.cut(df_copy['day'], bins=[0, 10, 20, 31], \n",
    "                                      labels=['early', 'mid', 'late'])\n",
    "    \n",
    "    for period in ['early', 'mid', 'late']:\n",
    "        period_df = df_copy[df_copy['month_period'] == period]\n",
    "        if len(period_df) > 0:\n",
    "            period_stats = period_df.groupby('CUST_NO').agg({\n",
    "                'APSDTRAMT_abs': ['sum', 'mean', 'count']\n",
    "            }).reset_index()\n",
    "            period_stats.columns = ['CUST_NO', f'aps_{period}_month_amt_sum',\n",
    "                                    f'aps_{period}_month_amt_mean', f'aps_{period}_month_count']\n",
    "            features = features.merge(period_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 月末占比(填充缺失值)\n",
    "    for period in ['early', 'mid', 'late']:\n",
    "        if f'aps_{period}_month_count' not in features.columns:\n",
    "            features[f'aps_{period}_month_count'] = 0\n",
    "    \n",
    "    total_count = features['aps_early_month_count'].fillna(0) + features['aps_mid_month_count'].fillna(0) + features['aps_late_month_count'].fillna(0)\n",
    "    features['aps_late_month_pct'] = features['aps_late_month_count'] / (total_count + 1e-5)\n",
    "    features['aps_early_month_pct'] = features['aps_early_month_count'] / (total_count + 1e-5)\n",
    "    \n",
    "    # 星期几分布\n",
    "    for day in range(7):\n",
    "        day_df = df[df['weekday'] == day]\n",
    "        if len(day_df) > 0:\n",
    "            day_amt = day_df.groupby('CUST_NO')['APSDTRAMT_abs'].sum().reset_index()\n",
    "            day_amt.columns = ['CUST_NO', f'aps_weekday{day}_amt']\n",
    "            features = features.merge(day_amt, on='CUST_NO', how='left')\n",
    "            \n",
    "            day_cnt = day_df.groupby('CUST_NO').size().reset_index(name=f'aps_weekday{day}_count')\n",
    "            features = features.merge(day_cnt, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 时段特征\n",
    "    if 'time_period' in df.columns:\n",
    "        for period in ['dawn', 'morning', 'afternoon', 'night']:\n",
    "            period_df = df[df['time_period'] == period]\n",
    "            if len(period_df) > 0:\n",
    "                period_stats = period_df.groupby('CUST_NO').agg({\n",
    "                    'APSDTRAMT_abs': ['sum', 'count']\n",
    "                }).reset_index()\n",
    "                period_stats.columns = ['CUST_NO', f'aps_{period}_amt_sum', f'aps_{period}_count']\n",
    "                features = features.merge(period_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 时段占比\n",
    "        total_count_df = df.groupby('CUST_NO').size().reset_index(name='total')\n",
    "        features = features.merge(total_count_df, on='CUST_NO', how='left')\n",
    "        for period in ['dawn', 'morning', 'afternoon', 'night']:\n",
    "            if f'aps_{period}_count' in features.columns:\n",
    "                features[f'aps_{period}_pct'] = features[f'aps_{period}_count'] / (features['total'] + 1e-5)\n",
    "        features.drop('total', axis=1, inplace=True)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"周期性特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45ce3692",
   "metadata": {},
   "source": [
    "### 5. 交易分类特征(交易码、渠道、摘要)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "95f6c929",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "交易分类特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def categorical_features(df, windows=[30, 60, 90]):\n",
    "    \"\"\"\n",
    "    交易分类特征:交易码、渠道、摘要、三方标识等\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        if len(sub_df) == 0:\n",
    "            continue\n",
    "        \n",
    "        # 交易码统计\n",
    "        cod_stats = sub_df.groupby('CUST_NO')['APSDTRCOD'].agg([\n",
    "            'nunique',\n",
    "            ('most_freq', lambda x: x.mode()[0] if len(x.mode()) > 0 else None)\n",
    "        ]).reset_index()\n",
    "        cod_stats.columns = ['CUST_NO', f'aps_unique_cod_{window}d', f'aps_most_freq_cod_{window}d']\n",
    "        features = features.merge(cod_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易码频次编码\n",
    "        cod_freq = sub_df.groupby('APSDTRCOD').size().to_dict()\n",
    "        sub_df[f'cod_freq'] = sub_df['APSDTRCOD'].map(cod_freq)\n",
    "        cod_freq_feat = sub_df.groupby('CUST_NO')['cod_freq'].agg(['mean', 'max', 'min']).reset_index()\n",
    "        cod_freq_feat.columns = ['CUST_NO', f'aps_cod_freq_mean_{window}d',\n",
    "                                  f'aps_cod_freq_max_{window}d', f'aps_cod_freq_min_{window}d']\n",
    "        features = features.merge(cod_freq_feat, on='CUST_NO', how='left')\n",
    "        \n",
    "        # Top5交易码\n",
    "        top_codes = sub_df['APSDTRCOD'].value_counts().head(10).index.tolist()\n",
    "        for i, code in enumerate(top_codes[:5], 1):\n",
    "            code_df = sub_df[sub_df['APSDTRCOD'] == code]\n",
    "            code_stats = code_df.groupby('CUST_NO').agg({\n",
    "                'APSDTRAMT_abs': ['sum', 'count']\n",
    "            }).reset_index()\n",
    "            code_stats.columns = ['CUST_NO', f'aps_top{i}_cod_amt_{window}d', f'aps_top{i}_cod_count_{window}d']\n",
    "            features = features.merge(code_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易渠道统计\n",
    "        chl_stats = sub_df.groupby('CUST_NO')['APSDTRCHL'].nunique().reset_index()\n",
    "        chl_stats.columns = ['CUST_NO', f'aps_unique_chl_{window}d']\n",
    "        features = features.merge(chl_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易渠道频次编码\n",
    "        chl_freq = sub_df.groupby('APSDTRCHL').size().to_dict()\n",
    "        sub_df[f'chl_freq'] = sub_df['APSDTRCHL'].map(chl_freq)\n",
    "        chl_freq_feat = sub_df.groupby('CUST_NO')['chl_freq'].agg(['mean', 'max', 'min']).reset_index()\n",
    "        chl_freq_feat.columns = ['CUST_NO', f'aps_chl_freq_mean_{window}d',\n",
    "                                  f'aps_chl_freq_max_{window}d', f'aps_chl_freq_min_{window}d']\n",
    "        features = features.merge(chl_freq_feat, on='CUST_NO', how='left')\n",
    "        \n",
    "        # Top5交易渠道\n",
    "        top_channels = sub_df['APSDTRCHL'].value_counts().head(10).index.tolist()\n",
    "        for i, chl in enumerate(top_channels[:5], 1):\n",
    "            chl_df = sub_df[sub_df['APSDTRCHL'] == chl]\n",
    "            chl_stats = chl_df.groupby('CUST_NO').agg({\n",
    "                'APSDTRAMT_abs': ['sum', 'count']\n",
    "            }).reset_index()\n",
    "            chl_stats.columns = ['CUST_NO', f'aps_top{i}_chl_amt_{window}d', f'aps_top{i}_chl_count_{window}d']\n",
    "            features = features.merge(chl_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 摘要统计\n",
    "        if 'APSDABS' in sub_df.columns:\n",
    "            abs_stats = sub_df.groupby('CUST_NO')['APSDABS'].nunique().reset_index()\n",
    "            abs_stats.columns = ['CUST_NO', f'aps_unique_abs_{window}d']\n",
    "            features = features.merge(abs_stats, on='CUST_NO', how='left')\n",
    "            \n",
    "            # Top3摘要\n",
    "            top_abs = sub_df['APSDABS'].value_counts().head(5).index.tolist()\n",
    "            for i, abs_val in enumerate(top_abs[:3], 1):\n",
    "                abs_df = sub_df[sub_df['APSDABS'] == abs_val]\n",
    "                abs_stats = abs_df.groupby('CUST_NO').agg({\n",
    "                    'APSDTRAMT_abs': ['sum', 'max', 'min', 'count']\n",
    "                }).reset_index()\n",
    "                abs_stats.columns = ['CUST_NO', f'aps_top{i}_abs_amt_sum_{window}d',\n",
    "                                     f'aps_top{i}_abs_amt_max_{window}d', f'aps_top{i}_abs_amt_min_{window}d',\n",
    "                                     f'aps_top{i}_abs_count_{window}d']\n",
    "                features = features.merge(abs_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 三方交易标识统计\n",
    "        if 'APSDFLAG' in sub_df.columns:\n",
    "            flag_stats = sub_df.groupby('CUST_NO')['APSDFLAG'].nunique().reset_index()\n",
    "            flag_stats.columns = ['CUST_NO', f'aps_unique_flag_{window}d']\n",
    "            features = features.merge(flag_stats, on='CUST_NO', how='left')\n",
    "            \n",
    "            # 有三方标识的交易统计\n",
    "            flag_df = sub_df[sub_df['APSDFLAG'].notna()]\n",
    "            if len(flag_df) > 0:\n",
    "                flag_txn = flag_df.groupby('CUST_NO').agg({\n",
    "                    'APSDTRAMT_abs': ['sum', 'count']\n",
    "                }).reset_index()\n",
    "                flag_txn.columns = ['CUST_NO', f'aps_flag_amt_{window}d', f'aps_flag_count_{window}d']\n",
    "                features = features.merge(flag_txn, on='CUST_NO', how='left')\n",
    "                \n",
    "                # 三方交易占比\n",
    "                total_count = sub_df.groupby('CUST_NO').size().reset_index(name='total')\n",
    "                features = features.merge(total_count, on='CUST_NO', how='left')\n",
    "                features[f'aps_flag_pct_{window}d'] = features[f'aps_flag_count_{window}d'] / (features['total'] + 1e-5)\n",
    "                features.drop('total', axis=1, inplace=True)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"交易分类特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bd6dde60",
   "metadata": {},
   "source": [
    "### 6. 金额分布特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "3be90992",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "金额分布特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def amount_distribution_features(df, windows=[30, 60, 90]):\n",
    "    \"\"\"\n",
    "    金额分布特征:不同金额段的交易统计\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    amt_bins = [\n",
    "        (0, 100, 'very_small'),\n",
    "        (100, 500, 'small'),\n",
    "        (500, 1000, 'medium'),\n",
    "        (1000, 5000, 'large'),\n",
    "        (5000, 10000, 'very_large'),\n",
    "        (10000, 50000, 'huge'),\n",
    "        (50000, float('inf'), 'mega')\n",
    "    ]\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        if len(sub_df) == 0:\n",
    "            continue\n",
    "        \n",
    "        for low, high, label in amt_bins:\n",
    "            amt_df = sub_df[(sub_df['APSDTRAMT_abs'] >= low) & (sub_df['APSDTRAMT_abs'] < high)]\n",
    "            \n",
    "            if len(amt_df) > 0:\n",
    "                # 该金额段的交易统计\n",
    "                amt_stats = amt_df.groupby('CUST_NO').agg({\n",
    "                    'APSDTRAMT_abs': ['sum', 'count']\n",
    "                }).reset_index()\n",
    "                amt_stats.columns = ['CUST_NO', f'aps_{label}_amt_{window}d', f'aps_{label}_count_{window}d']\n",
    "                features = features.merge(amt_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 计算各金额段占比\n",
    "        total_count = sub_df.groupby('CUST_NO').size().reset_index(name='total')\n",
    "        features = features.merge(total_count, on='CUST_NO', how='left')\n",
    "        \n",
    "        for _, _, label in amt_bins:\n",
    "            if f'aps_{label}_count_{window}d' in features.columns:\n",
    "                features[f'aps_{label}_pct_{window}d'] = features[f'aps_{label}_count_{window}d'] / (features['total'] + 1e-5)\n",
    "        \n",
    "        features.drop('total', axis=1, inplace=True)\n",
    "        \n",
    "        # 大小额比率\n",
    "        if f'aps_large_count_{window}d' in features.columns and f'aps_small_count_{window}d' in features.columns:\n",
    "            features[f'aps_large_small_ratio_{window}d'] = features[f'aps_large_count_{window}d'] / (features[f'aps_small_count_{window}d'] + 1e-5)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"金额分布特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e2094345",
   "metadata": {},
   "source": [
    "### 7. 交易稳定性与异常检测特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "35bba258",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "稳定性与异常检测特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def stability_and_anomaly_features(df, windows=[30, 60, 90]):\n",
    "    \"\"\"\n",
    "    交易稳定性与异常检测特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    for window in windows:\n",
    "        sub_df = df[df['days_to_now'] < window].copy()\n",
    "        \n",
    "        if len(sub_df) == 0:\n",
    "            continue\n",
    "        \n",
    "        # 交易间隔统计\n",
    "        sub_df = sub_df.sort_values(['CUST_NO', 'APSDTRDAT'])\n",
    "        sub_df['days_diff'] = sub_df.groupby('CUST_NO')['APSDTRDAT'].diff().dt.days\n",
    "        \n",
    "        interval_stats = sub_df.groupby('CUST_NO')['days_diff'].agg([\n",
    "            'mean', 'std', 'max', 'min', 'median'\n",
    "        ]).reset_index()\n",
    "        interval_stats.columns = ['CUST_NO', f'aps_interval_mean_{window}d', f'aps_interval_std_{window}d',\n",
    "                                   f'aps_interval_max_{window}d', f'aps_interval_min_{window}d',\n",
    "                                   f'aps_interval_median_{window}d']\n",
    "        features = features.merge(interval_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易规律性(间隔变异系数)\n",
    "        features[f'aps_regularity_{window}d'] = features[f'aps_interval_std_{window}d'] / (features[f'aps_interval_mean_{window}d'] + 1e-5)\n",
    "        \n",
    "        # 异常大额交易(超过均值+3倍标准差)\n",
    "        amt_stats = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].agg(['mean', 'std']).reset_index()\n",
    "        amt_stats['threshold'] = amt_stats['mean'] + 3 * amt_stats['std']\n",
    "        \n",
    "        sub_df_with_threshold = sub_df.merge(amt_stats[['CUST_NO', 'threshold']], on='CUST_NO', how='left')\n",
    "        sub_df_with_threshold['is_outlier'] = (sub_df_with_threshold['APSDTRAMT_abs'] > sub_df_with_threshold['threshold']).astype(int)\n",
    "        \n",
    "        outlier_stats = sub_df_with_threshold.groupby('CUST_NO')['is_outlier'].agg(['sum', 'mean']).reset_index()\n",
    "        outlier_stats.columns = ['CUST_NO', f'aps_outlier_count_{window}d', f'aps_outlier_ratio_{window}d']\n",
    "        features = features.merge(outlier_stats, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 异常小额交易(低于均值-2倍标准差)\n",
    "        amt_stats['threshold_low'] = amt_stats['mean'] - 2 * amt_stats['std']\n",
    "        sub_df_with_threshold = sub_df.merge(amt_stats[['CUST_NO', 'threshold_low']], on='CUST_NO', how='left')\n",
    "        sub_df_with_threshold['is_low_outlier'] = (sub_df_with_threshold['APSDTRAMT_abs'] < sub_df_with_threshold['threshold_low']).astype(int)\n",
    "        \n",
    "        low_outlier = sub_df_with_threshold.groupby('CUST_NO')['is_low_outlier'].sum().reset_index()\n",
    "        low_outlier.columns = ['CUST_NO', f'aps_low_outlier_count_{window}d']\n",
    "        features = features.merge(low_outlier, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 连续交易天数最大值\n",
    "        sub_df_daily = sub_df.groupby(['CUST_NO', 'APSDTRDAT']).size().reset_index()\n",
    "        \n",
    "        def max_consecutive_days(group):\n",
    "            dates = pd.to_datetime(group['APSDTRDAT']).sort_values()\n",
    "            if len(dates) <= 1:\n",
    "                return 1\n",
    "            \n",
    "            max_consec = 1\n",
    "            current_consec = 1\n",
    "            \n",
    "            for i in range(1, len(dates)):\n",
    "                if (dates.iloc[i] - dates.iloc[i-1]).days == 1:\n",
    "                    current_consec += 1\n",
    "                    max_consec = max(max_consec, current_consec)\n",
    "                else:\n",
    "                    current_consec = 1\n",
    "            \n",
    "            return max_consec\n",
    "        \n",
    "        max_consec = sub_df_daily.groupby('CUST_NO').apply(max_consecutive_days).reset_index()\n",
    "        max_consec.columns = ['CUST_NO', f'aps_max_consecutive_days_{window}d']\n",
    "        features = features.merge(max_consec, on='CUST_NO', how='left')\n",
    "        \n",
    "        # 交易金额偏度和峰度\n",
    "        amt_skew = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].skew().reset_index()\n",
    "        amt_skew.columns = ['CUST_NO', f'aps_amt_skew_{window}d']\n",
    "        features = features.merge(amt_skew, on='CUST_NO', how='left')\n",
    "        \n",
    "        amt_kurt = sub_df.groupby('CUST_NO')['APSDTRAMT_abs'].apply(lambda x: x.kurt()).reset_index()\n",
    "        amt_kurt.columns = ['CUST_NO', f'aps_amt_kurt_{window}d']\n",
    "        features = features.merge(amt_kurt, on='CUST_NO', how='left')\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"稳定性与异常检测特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb107c3e",
   "metadata": {},
   "source": [
    "### 8. 首末次交易特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "2980b715",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "首末次交易特征函数已定义\n"
     ]
    }
   ],
   "source": [
    "def first_last_transaction_features(df):\n",
    "    \"\"\"\n",
    "    首末次交易特征\n",
    "    \"\"\"\n",
    "    features = pd.DataFrame({'CUST_NO': df['CUST_NO'].unique()})\n",
    "    \n",
    "    df_sorted = df.sort_values(['CUST_NO', 'APSDTRDAT', 'APSDTRTIME'])\n",
    "    \n",
    "    # 首次交易特征\n",
    "    first_txn = df_sorted.groupby('CUST_NO').first().reset_index()\n",
    "    features['aps_first_txn_amt'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['APSDTRAMT_abs'])\n",
    "    features['aps_first_txn_days_ago'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['days_to_now'])\n",
    "    features['aps_first_txn_is_income'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['is_income'])\n",
    "    features['aps_first_txn_weekday'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['weekday'])\n",
    "    features['aps_first_txn_is_weekend'] = features['CUST_NO'].map(first_txn.set_index('CUST_NO')['is_weekend'])\n",
    "    \n",
    "    # 末次交易特征\n",
    "    last_txn = df_sorted.groupby('CUST_NO').last().reset_index()\n",
    "    features['aps_last_txn_amt'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['APSDTRAMT_abs'])\n",
    "    features['aps_last_txn_days_ago'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['days_to_now'])\n",
    "    features['aps_last_txn_is_income'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['is_income'])\n",
    "    features['aps_last_txn_weekday'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['weekday'])\n",
    "    features['aps_last_txn_is_weekend'] = features['CUST_NO'].map(last_txn.set_index('CUST_NO')['is_weekend'])\n",
    "    \n",
    "    # 首末次交易对比\n",
    "    features['aps_first_last_amt_ratio'] = features['aps_first_txn_amt'] / (features['aps_last_txn_amt'] + 1e-5)\n",
    "    features['aps_first_last_amt_diff'] = features['aps_first_txn_amt'] - features['aps_last_txn_amt']\n",
    "    \n",
    "    # 交易生命周期\n",
    "    features['aps_lifecycle_days'] = features['aps_first_txn_days_ago'] - features['aps_last_txn_days_ago']\n",
    "    \n",
    "    # 近期是否有交易\n",
    "    features['aps_has_txn_last_3d'] = (features['aps_last_txn_days_ago'] < 3).astype(int)\n",
    "    features['aps_has_txn_last_7d'] = (features['aps_last_txn_days_ago'] < 7).astype(int)\n",
    "    features['aps_has_txn_last_15d'] = (features['aps_last_txn_days_ago'] < 15).astype(int)\n",
    "    features['aps_has_txn_last_30d'] = (features['aps_last_txn_days_ago'] < 30).astype(int)\n",
    "    \n",
    "    # 最近5笔交易统计\n",
    "    last_n_txns = df_sorted.groupby('CUST_NO').tail(5)\n",
    "    last_n_stats = last_n_txns.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "        'mean', 'std', 'max', 'min'\n",
    "    ]).reset_index()\n",
    "    last_n_stats.columns = ['CUST_NO', 'aps_last5_amt_mean', 'aps_last5_amt_std',\n",
    "                            'aps_last5_amt_max', 'aps_last5_amt_min']\n",
    "    features = features.merge(last_n_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 最早5笔交易统计\n",
    "    first_n_txns = df_sorted.groupby('CUST_NO').head(5)\n",
    "    first_n_stats = first_n_txns.groupby('CUST_NO')['APSDTRAMT_abs'].agg([\n",
    "        'mean', 'std', 'max', 'min'\n",
    "    ]).reset_index()\n",
    "    first_n_stats.columns = ['CUST_NO', 'aps_first5_amt_mean', 'aps_first5_amt_std',\n",
    "                             'aps_first5_amt_max', 'aps_first5_amt_min']\n",
    "    features = features.merge(first_n_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 首末5笔平均金额对比\n",
    "    features['aps_first5_last5_amt_ratio'] = features['aps_first5_amt_mean'] / (features['aps_last5_amt_mean'] + 1e-5)\n",
    "    \n",
    "    return features\n",
    "\n",
    "print(\"首末次交易特征函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f7ec848b",
   "metadata": {},
   "source": [
    "## 特征生成主函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "473020a9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "特征生成主函数已定义\n"
     ]
    }
   ],
   "source": [
    "def generate_all_features(df, dataset_name='Train'):\n",
    "    \"\"\"\n",
    "    生成所有活期交易特征\n",
    "    \"\"\"\n",
    "    print(\"\\n\" + \"=\" * 80)\n",
    "    print(f\"开始生成{dataset_name}集活期交易特征\")\n",
    "    print(\"=\" * 80)\n",
    "    \n",
    "    # 数据预处理\n",
    "    print(\"\\n[0/8] 数据预处理...\")\n",
    "    df_processed = preprocess_tr_aps_data(df)\n",
    "    \n",
    "    # 初始化特征集\n",
    "    all_features = pd.DataFrame({'CUST_NO': df_processed['CUST_NO'].unique()})\n",
    "    print(f\"\\n客户数: {all_features.shape[0]:,}\")\n",
    "    \n",
    "    # 1. 基础统计特征\n",
    "    print(\"\\n[1/8] 生成基础统计特征...\")\n",
    "    start_time = time.time()\n",
    "    feat1 = basic_statistics_features(df_processed, windows=[7, 15, 30, 60, 90])\n",
    "    all_features = all_features.merge(feat1, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat1\n",
    "    gc.collect()\n",
    "    \n",
    "    # 2. 流入流出分离特征\n",
    "    print(\"\\n[2/8] 生成流入流出分离特征...\")\n",
    "    start_time = time.time()\n",
    "    feat2 = inflow_outflow_features(df_processed, windows=[7, 15, 30, 60, 90])\n",
    "    all_features = all_features.merge(feat2, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat2\n",
    "    gc.collect()\n",
    "    \n",
    "    # 3. 时序趋势特征\n",
    "    print(\"\\n[3/8] 生成时序趋势特征...\")\n",
    "    start_time = time.time()\n",
    "    feat3 = time_trend_features(df_processed)\n",
    "    all_features = all_features.merge(feat3, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat3\n",
    "    gc.collect()\n",
    "    \n",
    "    # 4. 周期性特征\n",
    "    print(\"\\n[4/8] 生成周期性特征...\")\n",
    "    start_time = time.time()\n",
    "    feat4 = periodicity_features(df_processed)\n",
    "    all_features = all_features.merge(feat4, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat4\n",
    "    gc.collect()\n",
    "    \n",
    "    # 5. 交易分类特征\n",
    "    print(\"\\n[5/8] 生成交易分类特征...\")\n",
    "    start_time = time.time()\n",
    "    feat5 = categorical_features(df_processed, windows=[30, 60, 90])\n",
    "    all_features = all_features.merge(feat5, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat5\n",
    "    gc.collect()\n",
    "    \n",
    "    # 6. 金额分布特征\n",
    "    print(\"\\n[6/8] 生成金额分布特征...\")\n",
    "    start_time = time.time()\n",
    "    feat6 = amount_distribution_features(df_processed, windows=[30, 60, 90])\n",
    "    all_features = all_features.merge(feat6, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat6\n",
    "    gc.collect()\n",
    "    \n",
    "    # 7. 稳定性与异常检测特征\n",
    "    print(\"\\n[7/8] 生成稳定性与异常检测特征...\")\n",
    "    start_time = time.time()\n",
    "    feat7 = stability_and_anomaly_features(df_processed, windows=[30, 60, 90])\n",
    "    all_features = all_features.merge(feat7, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat7\n",
    "    gc.collect()\n",
    "    \n",
    "    # 8. 首末次交易特征\n",
    "    print(\"\\n[8/8] 生成首末次交易特征...\")\n",
    "    start_time = time.time()\n",
    "    feat8 = first_last_transaction_features(df_processed)\n",
    "    all_features = all_features.merge(feat8, on='CUST_NO', how='left')\n",
    "    print(f\"   当前特征数: {all_features.shape[1] - 1}, 耗时: {time.time() - start_time:.2f}秒\")\n",
    "    del feat8\n",
    "    gc.collect()\n",
    "    \n",
    "    print(\"\\n\" + \"=\" * 80)\n",
    "    print(f\"特征生成完成! 总特征数: {all_features.shape[1] - 1}\")\n",
    "    print(\"=\" * 80)\n",
    "    \n",
    "    return all_features\n",
    "\n",
    "print(\"特征生成主函数已定义\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "93cf6454",
   "metadata": {},
   "source": [
    "## 执行特征生成"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ef70b71e",
   "metadata": {},
   "source": [
    "### 训练集特征生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "eba9f6fc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "开始生成Train集活期交易特征\n",
      "================================================================================\n",
      "\n",
      "[0/8] 数据预处理...\n",
      "预处理完成:\n",
      "  - 数据形状: (2000, 20)\n",
      "  - 日期范围: 2025-04-11 00:00:00 至 2025-04-11 00:00:00\n",
      "  - 总天数: 0天\n",
      "  - 客户数: 654\n",
      "\n",
      "客户数: 654\n",
      "\n",
      "[1/8] 生成基础统计特征...\n",
      "   当前特征数: 90, 耗时: 0.85秒\n",
      "\n",
      "[2/8] 生成流入流出分离特征...\n",
      "   当前特征数: 205, 耗时: 0.03秒\n",
      "\n",
      "[3/8] 生成时序趋势特征...\n",
      "   当前特征数: 90, 耗时: 0.85秒\n",
      "\n",
      "[2/8] 生成流入流出分离特征...\n",
      "   当前特征数: 205, 耗时: 0.03秒\n",
      "\n",
      "[3/8] 生成时序趋势特征...\n",
      "   当前特征数: 226, 耗时: 0.04秒\n",
      "\n",
      "[4/8] 生成周期性特征...\n",
      "   当前特征数: 260, 耗时: 0.02秒\n",
      "\n",
      "[5/8] 生成交易分类特征...\n",
      "   当前特征数: 226, 耗时: 0.04秒\n",
      "\n",
      "[4/8] 生成周期性特征...\n",
      "   当前特征数: 260, 耗时: 0.02秒\n",
      "\n",
      "[5/8] 生成交易分类特征...\n",
      "   当前特征数: 398, 耗时: 0.22秒\n",
      "\n",
      "[6/8] 生成金额分布特征...\n",
      "   当前特征数: 464, 耗时: 0.04秒\n",
      "\n",
      "[7/8] 生成稳定性与异常检测特征...\n",
      "   当前特征数: 398, 耗时: 0.22秒\n",
      "\n",
      "[6/8] 生成金额分布特征...\n",
      "   当前特征数: 464, 耗时: 0.04秒\n",
      "\n",
      "[7/8] 生成稳定性与异常检测特征...\n",
      "   当前特征数: 500, 耗时: 0.25秒\n",
      "\n",
      "[8/8] 生成首末次交易特征...\n",
      "   当前特征数: 526, 耗时: 0.01秒\n",
      "\n",
      "================================================================================\n",
      "特征生成完成! 总特征数: 526\n",
      "================================================================================\n",
      "\n",
      "训练集特征形状: (654, 527)\n",
      "缺失值统计:\n",
      "138562\n",
      "   当前特征数: 500, 耗时: 0.25秒\n",
      "\n",
      "[8/8] 生成首末次交易特征...\n",
      "   当前特征数: 526, 耗时: 0.01秒\n",
      "\n",
      "================================================================================\n",
      "特征生成完成! 总特征数: 526\n",
      "================================================================================\n",
      "\n",
      "训练集特征形状: (654, 527)\n",
      "缺失值统计:\n",
      "138562\n"
     ]
    }
   ],
   "source": [
    "train_features = generate_all_features(tr_aps_train, dataset_name='Train')\n",
    "\n",
    "print(f\"\\n训练集特征形状: {train_features.shape}\")\n",
    "print(f\"缺失值统计:\")\n",
    "print(train_features.isnull().sum().sum())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0d665d41",
   "metadata": {},
   "source": [
    "### 测试集特征生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "6c67374c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "开始生成Test集活期交易特征\n",
      "================================================================================\n",
      "\n",
      "[0/8] 数据预处理...\n",
      "预处理完成:\n",
      "  - 数据形状: (1000, 20)\n",
      "  - 日期范围: 2025-06-11 00:00:00 至 2025-06-11 00:00:00\n",
      "  - 总天数: 0天\n",
      "  - 客户数: 348\n",
      "\n",
      "客户数: 348\n",
      "\n",
      "[1/8] 生成基础统计特征...\n",
      "   当前特征数: 90, 耗时: 0.47秒\n",
      "\n",
      "[2/8] 生成流入流出分离特征...\n",
      "   当前特征数: 205, 耗时: 0.03秒\n",
      "\n",
      "[3/8] 生成时序趋势特征...\n",
      "   当前特征数: 226, 耗时: 0.04秒\n",
      "   当前特征数: 90, 耗时: 0.47秒\n",
      "\n",
      "[2/8] 生成流入流出分离特征...\n",
      "   当前特征数: 205, 耗时: 0.03秒\n",
      "\n",
      "[3/8] 生成时序趋势特征...\n",
      "   当前特征数: 226, 耗时: 0.04秒\n",
      "\n",
      "[4/8] 生成周期性特征...\n",
      "   当前特征数: 260, 耗时: 0.02秒\n",
      "\n",
      "[5/8] 生成交易分类特征...\n",
      "\n",
      "[4/8] 生成周期性特征...\n",
      "   当前特征数: 260, 耗时: 0.02秒\n",
      "\n",
      "[5/8] 生成交易分类特征...\n",
      "   当前特征数: 398, 耗时: 0.16秒\n",
      "\n",
      "[6/8] 生成金额分布特征...\n",
      "   当前特征数: 464, 耗时: 0.04秒\n",
      "\n",
      "[7/8] 生成稳定性与异常检测特征...\n",
      "   当前特征数: 398, 耗时: 0.16秒\n",
      "\n",
      "[6/8] 生成金额分布特征...\n",
      "   当前特征数: 464, 耗时: 0.04秒\n",
      "\n",
      "[7/8] 生成稳定性与异常检测特征...\n",
      "   当前特征数: 500, 耗时: 0.15秒\n",
      "\n",
      "[8/8] 生成首末次交易特征...\n",
      "   当前特征数: 526, 耗时: 0.01秒\n",
      "\n",
      "================================================================================\n",
      "特征生成完成! 总特征数: 526\n",
      "================================================================================\n",
      "\n",
      "测试集特征形状: (348, 527)\n",
      "缺失值统计:\n",
      "73374\n",
      "   当前特征数: 500, 耗时: 0.15秒\n",
      "\n",
      "[8/8] 生成首末次交易特征...\n",
      "   当前特征数: 526, 耗时: 0.01秒\n",
      "\n",
      "================================================================================\n",
      "特征生成完成! 总特征数: 526\n",
      "================================================================================\n",
      "\n",
      "测试集特征形状: (348, 527)\n",
      "缺失值统计:\n",
      "73374\n"
     ]
    }
   ],
   "source": [
    "test_features = generate_all_features(tr_aps_test, dataset_name='Test')\n",
    "\n",
    "print(f\"\\n测试集特征形状: {test_features.shape}\")\n",
    "print(f\"缺失值统计:\")\n",
    "print(test_features.isnull().sum().sum())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c119fc09",
   "metadata": {},
   "source": [
    "## 特征保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "fb098c85",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "训练集特征文件已保存: ./feature\\train_tr_aps_dtl_features.pkl\n",
      "文件大小: 2.61 MB\n",
      "\n",
      "测试集特征文件已保存: ./feature\\test_tr_aps_dtl_features.pkl\n",
      "文件大小: 1.40 MB\n",
      "\n",
      "================================================================================\n",
      "活期交易明细表特征工程完成!\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "feature_dir = './feature'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征目录: {feature_dir}\")\n",
    "\n",
    "train_output_file = os.path.join(feature_dir, 'train_tr_aps_dtl_features.pkl')\n",
    "with open(train_output_file, 'wb') as f:\n",
    "    pickle.dump(train_features, f)\n",
    "\n",
    "print(f\"\\n训练集特征文件已保存: {train_output_file}\")\n",
    "print(f\"文件大小: {os.path.getsize(train_output_file) / 1024 / 1024:.2f} MB\")\n",
    "\n",
    "test_output_file = os.path.join(feature_dir, 'test_tr_aps_dtl_features.pkl')\n",
    "with open(test_output_file, 'wb') as f:\n",
    "    pickle.dump(test_features, f)\n",
    "\n",
    "print(f\"\\n测试集特征文件已保存: {test_output_file}\")\n",
    "print(f\"文件大小: {os.path.getsize(test_output_file) / 1024 / 1024:.2f} MB\")\n",
    "\n",
    "print(\"\\n\" + \"=\" * 80)\n",
    "print(\"活期交易明细表特征工程完成!\")\n",
    "print(\"=\" * 80)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "00959d9b",
   "metadata": {},
   "source": [
    "## 特征概览"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "98d6caff",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集特征列表:\n",
      "================================================================================\n",
      "  1. aps_amt_7d_sum\n",
      "  2. aps_amt_7d_mean\n",
      "  3. aps_amt_7d_std\n",
      "  4. aps_amt_7d_median\n",
      "  5. aps_amt_7d_max\n",
      "  6. aps_amt_7d_min\n",
      "  7. aps_amt_7d_q25\n",
      "  8. aps_amt_7d_q75\n",
      "  9. aps_count_7d\n",
      " 10. aps_active_days_7d\n",
      " 11. aps_daily_amt_avg_7d\n",
      " 12. aps_daily_count_avg_7d\n",
      " 13. aps_activity_rate_7d\n",
      " 14. aps_amt_cv_7d\n",
      " 15. aps_amt_range_7d\n",
      " 16. aps_amt_iqr_7d\n",
      " 17. aps_count_per_active_day_7d\n",
      " 18. aps_amt_per_active_day_7d\n",
      " 19. aps_amt_15d_sum\n",
      " 20. aps_amt_15d_mean\n",
      " 21. aps_amt_15d_std\n",
      " 22. aps_amt_15d_median\n",
      " 23. aps_amt_15d_max\n",
      " 24. aps_amt_15d_min\n",
      " 25. aps_amt_15d_q25\n",
      " 26. aps_amt_15d_q75\n",
      " 27. aps_count_15d\n",
      " 28. aps_active_days_15d\n",
      " 29. aps_daily_amt_avg_15d\n",
      " 30. aps_daily_count_avg_15d\n",
      " 31. aps_activity_rate_15d\n",
      " 32. aps_amt_cv_15d\n",
      " 33. aps_amt_range_15d\n",
      " 34. aps_amt_iqr_15d\n",
      " 35. aps_count_per_active_day_15d\n",
      " 36. aps_amt_per_active_day_15d\n",
      " 37. aps_amt_30d_sum\n",
      " 38. aps_amt_30d_mean\n",
      " 39. aps_amt_30d_std\n",
      " 40. aps_amt_30d_median\n",
      " 41. aps_amt_30d_max\n",
      " 42. aps_amt_30d_min\n",
      " 43. aps_amt_30d_q25\n",
      " 44. aps_amt_30d_q75\n",
      " 45. aps_count_30d\n",
      " 46. aps_active_days_30d\n",
      " 47. aps_daily_amt_avg_30d\n",
      " 48. aps_daily_count_avg_30d\n",
      " 49. aps_activity_rate_30d\n",
      " 50. aps_amt_cv_30d\n",
      " 51. aps_amt_range_30d\n",
      " 52. aps_amt_iqr_30d\n",
      " 53. aps_count_per_active_day_30d\n",
      " 54. aps_amt_per_active_day_30d\n",
      " 55. aps_amt_60d_sum\n",
      " 56. aps_amt_60d_mean\n",
      " 57. aps_amt_60d_std\n",
      " 58. aps_amt_60d_median\n",
      " 59. aps_amt_60d_max\n",
      " 60. aps_amt_60d_min\n",
      " 61. aps_amt_60d_q25\n",
      " 62. aps_amt_60d_q75\n",
      " 63. aps_count_60d\n",
      " 64. aps_active_days_60d\n",
      " 65. aps_daily_amt_avg_60d\n",
      " 66. aps_daily_count_avg_60d\n",
      " 67. aps_activity_rate_60d\n",
      " 68. aps_amt_cv_60d\n",
      " 69. aps_amt_range_60d\n",
      " 70. aps_amt_iqr_60d\n",
      " 71. aps_count_per_active_day_60d\n",
      " 72. aps_amt_per_active_day_60d\n",
      " 73. aps_amt_90d_sum\n",
      " 74. aps_amt_90d_mean\n",
      " 75. aps_amt_90d_std\n",
      " 76. aps_amt_90d_median\n",
      " 77. aps_amt_90d_max\n",
      " 78. aps_amt_90d_min\n",
      " 79. aps_amt_90d_q25\n",
      " 80. aps_amt_90d_q75\n",
      " 81. aps_count_90d\n",
      " 82. aps_active_days_90d\n",
      " 83. aps_daily_amt_avg_90d\n",
      " 84. aps_daily_count_avg_90d\n",
      " 85. aps_activity_rate_90d\n",
      " 86. aps_amt_cv_90d\n",
      " 87. aps_amt_range_90d\n",
      " 88. aps_amt_iqr_90d\n",
      " 89. aps_count_per_active_day_90d\n",
      " 90. aps_amt_per_active_day_90d\n",
      " 91. aps_in_7d_sum\n",
      " 92. aps_in_7d_mean\n",
      " 93. aps_in_7d_count\n",
      " 94. aps_in_7d_max\n",
      " 95. aps_in_7d_min\n",
      " 96. aps_in_7d_std\n",
      " 97. aps_in_7d_median\n",
      " 98. aps_in_active_days_7d\n",
      " 99. aps_out_7d_sum\n",
      "100. aps_out_7d_mean\n",
      "101. aps_out_7d_count\n",
      "102. aps_out_7d_max\n",
      "103. aps_out_7d_min\n",
      "104. aps_out_7d_std\n",
      "105. aps_out_7d_median\n",
      "106. aps_out_active_days_7d\n",
      "107. aps_in_out_amt_ratio_7d\n",
      "108. aps_in_out_count_ratio_7d\n",
      "109. aps_net_inflow_7d\n",
      "110. aps_in_out_active_diff_7d\n",
      "111. aps_in_out_freq_diff_7d\n",
      "112. aps_in_amt_pct_7d\n",
      "113. aps_in_count_pct_7d\n",
      "114. aps_in_15d_sum\n",
      "115. aps_in_15d_mean\n",
      "116. aps_in_15d_count\n",
      "117. aps_in_15d_max\n",
      "118. aps_in_15d_min\n",
      "119. aps_in_15d_std\n",
      "120. aps_in_15d_median\n",
      "121. aps_in_active_days_15d\n",
      "122. aps_out_15d_sum\n",
      "123. aps_out_15d_mean\n",
      "124. aps_out_15d_count\n",
      "125. aps_out_15d_max\n",
      "126. aps_out_15d_min\n",
      "127. aps_out_15d_std\n",
      "128. aps_out_15d_median\n",
      "129. aps_out_active_days_15d\n",
      "130. aps_in_out_amt_ratio_15d\n",
      "131. aps_in_out_count_ratio_15d\n",
      "132. aps_net_inflow_15d\n",
      "133. aps_in_out_active_diff_15d\n",
      "134. aps_in_out_freq_diff_15d\n",
      "135. aps_in_amt_pct_15d\n",
      "136. aps_in_count_pct_15d\n",
      "137. aps_in_30d_sum\n",
      "138. aps_in_30d_mean\n",
      "139. aps_in_30d_count\n",
      "140. aps_in_30d_max\n",
      "141. aps_in_30d_min\n",
      "142. aps_in_30d_std\n",
      "143. aps_in_30d_median\n",
      "144. aps_in_active_days_30d\n",
      "145. aps_out_30d_sum\n",
      "146. aps_out_30d_mean\n",
      "147. aps_out_30d_count\n",
      "148. aps_out_30d_max\n",
      "149. aps_out_30d_min\n",
      "150. aps_out_30d_std\n",
      "151. aps_out_30d_median\n",
      "152. aps_out_active_days_30d\n",
      "153. aps_in_out_amt_ratio_30d\n",
      "154. aps_in_out_count_ratio_30d\n",
      "155. aps_net_inflow_30d\n",
      "156. aps_in_out_active_diff_30d\n",
      "157. aps_in_out_freq_diff_30d\n",
      "158. aps_in_amt_pct_30d\n",
      "159. aps_in_count_pct_30d\n",
      "160. aps_in_60d_sum\n",
      "161. aps_in_60d_mean\n",
      "162. aps_in_60d_count\n",
      "163. aps_in_60d_max\n",
      "164. aps_in_60d_min\n",
      "165. aps_in_60d_std\n",
      "166. aps_in_60d_median\n",
      "167. aps_in_active_days_60d\n",
      "168. aps_out_60d_sum\n",
      "169. aps_out_60d_mean\n",
      "170. aps_out_60d_count\n",
      "171. aps_out_60d_max\n",
      "172. aps_out_60d_min\n",
      "173. aps_out_60d_std\n",
      "174. aps_out_60d_median\n",
      "175. aps_out_active_days_60d\n",
      "176. aps_in_out_amt_ratio_60d\n",
      "177. aps_in_out_count_ratio_60d\n",
      "178. aps_net_inflow_60d\n",
      "179. aps_in_out_active_diff_60d\n",
      "180. aps_in_out_freq_diff_60d\n",
      "181. aps_in_amt_pct_60d\n",
      "182. aps_in_count_pct_60d\n",
      "183. aps_in_90d_sum\n",
      "184. aps_in_90d_mean\n",
      "185. aps_in_90d_count\n",
      "186. aps_in_90d_max\n",
      "187. aps_in_90d_min\n",
      "188. aps_in_90d_std\n",
      "189. aps_in_90d_median\n",
      "190. aps_in_active_days_90d\n",
      "191. aps_out_90d_sum\n",
      "192. aps_out_90d_mean\n",
      "193. aps_out_90d_count\n",
      "194. aps_out_90d_max\n",
      "195. aps_out_90d_min\n",
      "196. aps_out_90d_std\n",
      "197. aps_out_90d_median\n",
      "198. aps_out_active_days_90d\n",
      "199. aps_in_out_amt_ratio_90d\n",
      "200. aps_in_out_count_ratio_90d\n",
      "201. aps_net_inflow_90d\n",
      "202. aps_in_out_active_diff_90d\n",
      "203. aps_in_out_freq_diff_90d\n",
      "204. aps_in_amt_pct_90d\n",
      "205. aps_in_count_pct_90d\n",
      "206. aps_amt_ratio_7d_30d\n",
      "207. aps_count_ratio_7d_30d\n",
      "208. aps_amt_ratio_7d_60d\n",
      "209. aps_count_ratio_7d_60d\n",
      "210. aps_amt_ratio_7d_90d\n",
      "211. aps_count_ratio_7d_90d\n",
      "212. aps_amt_ratio_30d_60d\n",
      "213. aps_count_ratio_30d_60d\n",
      "214. aps_amt_ratio_30d_90d\n",
      "215. aps_count_ratio_30d_90d\n",
      "216. aps_amt_ratio_60d_90d\n",
      "217. aps_count_ratio_60d_90d\n",
      "218. aps_monthly_amt_std\n",
      "219. aps_weekly_amt_std\n",
      "220. aps_month1_amt\n",
      "221. aps_month2_amt\n",
      "222. aps_month3_amt\n",
      "223. aps_month1_month2_growth\n",
      "224. aps_month2_month3_growth\n",
      "225. aps_trend_increasing\n",
      "226. aps_trend_decreasing\n",
      "227. aps_weekday_amt_sum\n",
      "228. aps_weekday_amt_mean\n",
      "229. aps_weekday_count\n",
      "230. aps_weekday_amt_max\n",
      "231. aps_weekday_active_days\n",
      "232. aps_weekend_amt_sum\n",
      "233. aps_weekend_amt_mean\n",
      "234. aps_weekend_count\n",
      "235. aps_weekend_amt_max\n",
      "236. aps_weekend_active_days\n",
      "237. aps_weekday_weekend_amt_ratio\n",
      "238. aps_weekday_weekend_count_ratio\n",
      "239. aps_weekday_amt_pct\n",
      "240. aps_mid_month_amt_sum\n",
      "241. aps_mid_month_amt_mean\n",
      "242. aps_mid_month_count\n",
      "243. aps_early_month_count\n",
      "244. aps_late_month_count\n",
      "245. aps_late_month_pct\n",
      "246. aps_early_month_pct\n",
      "247. aps_weekday4_amt\n",
      "248. aps_weekday4_count\n",
      "249. aps_dawn_amt_sum\n",
      "250. aps_dawn_count\n",
      "251. aps_morning_amt_sum\n",
      "252. aps_morning_count\n",
      "253. aps_afternoon_amt_sum\n",
      "254. aps_afternoon_count\n",
      "255. aps_night_amt_sum\n",
      "256. aps_night_count\n",
      "257. aps_dawn_pct\n",
      "258. aps_morning_pct\n",
      "259. aps_afternoon_pct\n",
      "260. aps_night_pct\n",
      "261. aps_unique_cod_30d\n",
      "262. aps_most_freq_cod_30d\n",
      "263. aps_cod_freq_mean_30d\n",
      "264. aps_cod_freq_max_30d\n",
      "265. aps_cod_freq_min_30d\n",
      "266. aps_top1_cod_amt_30d\n",
      "267. aps_top1_cod_count_30d\n",
      "268. aps_top2_cod_amt_30d\n",
      "269. aps_top2_cod_count_30d\n",
      "270. aps_top3_cod_amt_30d\n",
      "271. aps_top3_cod_count_30d\n",
      "272. aps_top4_cod_amt_30d\n",
      "273. aps_top4_cod_count_30d\n",
      "274. aps_top5_cod_amt_30d\n",
      "275. aps_top5_cod_count_30d\n",
      "276. aps_unique_chl_30d\n",
      "277. aps_chl_freq_mean_30d\n",
      "278. aps_chl_freq_max_30d\n",
      "279. aps_chl_freq_min_30d\n",
      "280. aps_top1_chl_amt_30d\n",
      "281. aps_top1_chl_count_30d\n",
      "282. aps_top2_chl_amt_30d\n",
      "283. aps_top2_chl_count_30d\n",
      "284. aps_top3_chl_amt_30d\n",
      "285. aps_top3_chl_count_30d\n",
      "286. aps_top4_chl_amt_30d\n",
      "287. aps_top4_chl_count_30d\n",
      "288. aps_top5_chl_amt_30d\n",
      "289. aps_top5_chl_count_30d\n",
      "290. aps_unique_abs_30d\n",
      "291. aps_top1_abs_amt_sum_30d\n",
      "292. aps_top1_abs_amt_max_30d\n",
      "293. aps_top1_abs_amt_min_30d\n",
      "294. aps_top1_abs_count_30d\n",
      "295. aps_top2_abs_amt_sum_30d\n",
      "296. aps_top2_abs_amt_max_30d\n",
      "297. aps_top2_abs_amt_min_30d\n",
      "298. aps_top2_abs_count_30d\n",
      "299. aps_top3_abs_amt_sum_30d\n",
      "300. aps_top3_abs_amt_max_30d\n",
      "301. aps_top3_abs_amt_min_30d\n",
      "302. aps_top3_abs_count_30d\n",
      "303. aps_unique_flag_30d\n",
      "304. aps_flag_amt_30d\n",
      "305. aps_flag_count_30d\n",
      "306. aps_flag_pct_30d\n",
      "307. aps_unique_cod_60d\n",
      "308. aps_most_freq_cod_60d\n",
      "309. aps_cod_freq_mean_60d\n",
      "310. aps_cod_freq_max_60d\n",
      "311. aps_cod_freq_min_60d\n",
      "312. aps_top1_cod_amt_60d\n",
      "313. aps_top1_cod_count_60d\n",
      "314. aps_top2_cod_amt_60d\n",
      "315. aps_top2_cod_count_60d\n",
      "316. aps_top3_cod_amt_60d\n",
      "317. aps_top3_cod_count_60d\n",
      "318. aps_top4_cod_amt_60d\n",
      "319. aps_top4_cod_count_60d\n",
      "320. aps_top5_cod_amt_60d\n",
      "321. aps_top5_cod_count_60d\n",
      "322. aps_unique_chl_60d\n",
      "323. aps_chl_freq_mean_60d\n",
      "324. aps_chl_freq_max_60d\n",
      "325. aps_chl_freq_min_60d\n",
      "326. aps_top1_chl_amt_60d\n",
      "327. aps_top1_chl_count_60d\n",
      "328. aps_top2_chl_amt_60d\n",
      "329. aps_top2_chl_count_60d\n",
      "330. aps_top3_chl_amt_60d\n",
      "331. aps_top3_chl_count_60d\n",
      "332. aps_top4_chl_amt_60d\n",
      "333. aps_top4_chl_count_60d\n",
      "334. aps_top5_chl_amt_60d\n",
      "335. aps_top5_chl_count_60d\n",
      "336. aps_unique_abs_60d\n",
      "337. aps_top1_abs_amt_sum_60d\n",
      "338. aps_top1_abs_amt_max_60d\n",
      "339. aps_top1_abs_amt_min_60d\n",
      "340. aps_top1_abs_count_60d\n",
      "341. aps_top2_abs_amt_sum_60d\n",
      "342. aps_top2_abs_amt_max_60d\n",
      "343. aps_top2_abs_amt_min_60d\n",
      "344. aps_top2_abs_count_60d\n",
      "345. aps_top3_abs_amt_sum_60d\n",
      "346. aps_top3_abs_amt_max_60d\n",
      "347. aps_top3_abs_amt_min_60d\n",
      "348. aps_top3_abs_count_60d\n",
      "349. aps_unique_flag_60d\n",
      "350. aps_flag_amt_60d\n",
      "351. aps_flag_count_60d\n",
      "352. aps_flag_pct_60d\n",
      "353. aps_unique_cod_90d\n",
      "354. aps_most_freq_cod_90d\n",
      "355. aps_cod_freq_mean_90d\n",
      "356. aps_cod_freq_max_90d\n",
      "357. aps_cod_freq_min_90d\n",
      "358. aps_top1_cod_amt_90d\n",
      "359. aps_top1_cod_count_90d\n",
      "360. aps_top2_cod_amt_90d\n",
      "361. aps_top2_cod_count_90d\n",
      "362. aps_top3_cod_amt_90d\n",
      "363. aps_top3_cod_count_90d\n",
      "364. aps_top4_cod_amt_90d\n",
      "365. aps_top4_cod_count_90d\n",
      "366. aps_top5_cod_amt_90d\n",
      "367. aps_top5_cod_count_90d\n",
      "368. aps_unique_chl_90d\n",
      "369. aps_chl_freq_mean_90d\n",
      "370. aps_chl_freq_max_90d\n",
      "371. aps_chl_freq_min_90d\n",
      "372. aps_top1_chl_amt_90d\n",
      "373. aps_top1_chl_count_90d\n",
      "374. aps_top2_chl_amt_90d\n",
      "375. aps_top2_chl_count_90d\n",
      "376. aps_top3_chl_amt_90d\n",
      "377. aps_top3_chl_count_90d\n",
      "378. aps_top4_chl_amt_90d\n",
      "379. aps_top4_chl_count_90d\n",
      "380. aps_top5_chl_amt_90d\n",
      "381. aps_top5_chl_count_90d\n",
      "382. aps_unique_abs_90d\n",
      "383. aps_top1_abs_amt_sum_90d\n",
      "384. aps_top1_abs_amt_max_90d\n",
      "385. aps_top1_abs_amt_min_90d\n",
      "386. aps_top1_abs_count_90d\n",
      "387. aps_top2_abs_amt_sum_90d\n",
      "388. aps_top2_abs_amt_max_90d\n",
      "389. aps_top2_abs_amt_min_90d\n",
      "390. aps_top2_abs_count_90d\n",
      "391. aps_top3_abs_amt_sum_90d\n",
      "392. aps_top3_abs_amt_max_90d\n",
      "393. aps_top3_abs_amt_min_90d\n",
      "394. aps_top3_abs_count_90d\n",
      "395. aps_unique_flag_90d\n",
      "396. aps_flag_amt_90d\n",
      "397. aps_flag_count_90d\n",
      "398. aps_flag_pct_90d\n",
      "399. aps_very_small_amt_30d\n",
      "400. aps_very_small_count_30d\n",
      "401. aps_small_amt_30d\n",
      "402. aps_small_count_30d\n",
      "403. aps_medium_amt_30d\n",
      "404. aps_medium_count_30d\n",
      "405. aps_large_amt_30d\n",
      "406. aps_large_count_30d\n",
      "407. aps_very_large_amt_30d\n",
      "408. aps_very_large_count_30d\n",
      "409. aps_huge_amt_30d\n",
      "410. aps_huge_count_30d\n",
      "411. aps_mega_amt_30d\n",
      "412. aps_mega_count_30d\n",
      "413. aps_very_small_pct_30d\n",
      "414. aps_small_pct_30d\n",
      "415. aps_medium_pct_30d\n",
      "416. aps_large_pct_30d\n",
      "417. aps_very_large_pct_30d\n",
      "418. aps_huge_pct_30d\n",
      "419. aps_mega_pct_30d\n",
      "420. aps_large_small_ratio_30d\n",
      "421. aps_very_small_amt_60d\n",
      "422. aps_very_small_count_60d\n",
      "423. aps_small_amt_60d\n",
      "424. aps_small_count_60d\n",
      "425. aps_medium_amt_60d\n",
      "426. aps_medium_count_60d\n",
      "427. aps_large_amt_60d\n",
      "428. aps_large_count_60d\n",
      "429. aps_very_large_amt_60d\n",
      "430. aps_very_large_count_60d\n",
      "431. aps_huge_amt_60d\n",
      "432. aps_huge_count_60d\n",
      "433. aps_mega_amt_60d\n",
      "434. aps_mega_count_60d\n",
      "435. aps_very_small_pct_60d\n",
      "436. aps_small_pct_60d\n",
      "437. aps_medium_pct_60d\n",
      "438. aps_large_pct_60d\n",
      "439. aps_very_large_pct_60d\n",
      "440. aps_huge_pct_60d\n",
      "441. aps_mega_pct_60d\n",
      "442. aps_large_small_ratio_60d\n",
      "443. aps_very_small_amt_90d\n",
      "444. aps_very_small_count_90d\n",
      "445. aps_small_amt_90d\n",
      "446. aps_small_count_90d\n",
      "447. aps_medium_amt_90d\n",
      "448. aps_medium_count_90d\n",
      "449. aps_large_amt_90d\n",
      "450. aps_large_count_90d\n",
      "451. aps_very_large_amt_90d\n",
      "452. aps_very_large_count_90d\n",
      "453. aps_huge_amt_90d\n",
      "454. aps_huge_count_90d\n",
      "455. aps_mega_amt_90d\n",
      "456. aps_mega_count_90d\n",
      "457. aps_very_small_pct_90d\n",
      "458. aps_small_pct_90d\n",
      "459. aps_medium_pct_90d\n",
      "460. aps_large_pct_90d\n",
      "461. aps_very_large_pct_90d\n",
      "462. aps_huge_pct_90d\n",
      "463. aps_mega_pct_90d\n",
      "464. aps_large_small_ratio_90d\n",
      "465. aps_interval_mean_30d\n",
      "466. aps_interval_std_30d\n",
      "467. aps_interval_max_30d\n",
      "468. aps_interval_min_30d\n",
      "469. aps_interval_median_30d\n",
      "470. aps_regularity_30d\n",
      "471. aps_outlier_count_30d\n",
      "472. aps_outlier_ratio_30d\n",
      "473. aps_low_outlier_count_30d\n",
      "474. aps_max_consecutive_days_30d\n",
      "475. aps_amt_skew_30d\n",
      "476. aps_amt_kurt_30d\n",
      "477. aps_interval_mean_60d\n",
      "478. aps_interval_std_60d\n",
      "479. aps_interval_max_60d\n",
      "480. aps_interval_min_60d\n",
      "481. aps_interval_median_60d\n",
      "482. aps_regularity_60d\n",
      "483. aps_outlier_count_60d\n",
      "484. aps_outlier_ratio_60d\n",
      "485. aps_low_outlier_count_60d\n",
      "486. aps_max_consecutive_days_60d\n",
      "487. aps_amt_skew_60d\n",
      "488. aps_amt_kurt_60d\n",
      "489. aps_interval_mean_90d\n",
      "490. aps_interval_std_90d\n",
      "491. aps_interval_max_90d\n",
      "492. aps_interval_min_90d\n",
      "493. aps_interval_median_90d\n",
      "494. aps_regularity_90d\n",
      "495. aps_outlier_count_90d\n",
      "496. aps_outlier_ratio_90d\n",
      "497. aps_low_outlier_count_90d\n",
      "498. aps_max_consecutive_days_90d\n",
      "499. aps_amt_skew_90d\n",
      "500. aps_amt_kurt_90d\n",
      "501. aps_first_txn_amt\n",
      "502. aps_first_txn_days_ago\n",
      "503. aps_first_txn_is_income\n",
      "504. aps_first_txn_weekday\n",
      "505. aps_first_txn_is_weekend\n",
      "506. aps_last_txn_amt\n",
      "507. aps_last_txn_days_ago\n",
      "508. aps_last_txn_is_income\n",
      "509. aps_last_txn_weekday\n",
      "510. aps_last_txn_is_weekend\n",
      "511. aps_first_last_amt_ratio\n",
      "512. aps_first_last_amt_diff\n",
      "513. aps_lifecycle_days\n",
      "514. aps_has_txn_last_3d\n",
      "515. aps_has_txn_last_7d\n",
      "516. aps_has_txn_last_15d\n",
      "517. aps_has_txn_last_30d\n",
      "518. aps_last5_amt_mean\n",
      "519. aps_last5_amt_std\n",
      "520. aps_last5_amt_max\n",
      "521. aps_last5_amt_min\n",
      "522. aps_first5_amt_mean\n",
      "523. aps_first5_amt_std\n",
      "524. aps_first5_amt_max\n",
      "525. aps_first5_amt_min\n",
      "526. aps_first5_last5_amt_ratio\n",
      "\n",
      "总计: 526 个特征\n",
      "\n",
      "================================================================================\n",
      "特征数据预览:\n",
      "                            CUST_NO  aps_amt_7d_sum  aps_amt_7d_mean  \\\n",
      "0  002ea5de4f0757bcd4b037401fb93301          4768.0           1192.0   \n",
      "1  004ace784736b5d1c930ec7551e678c0          6000.0           3000.0   \n",
      "2  00559388499965d49c943d4a9141d6d9           138.6             23.1   \n",
      "3  0106662c75558f1775215f7397fbc325           900.0            900.0   \n",
      "4  0199629b595a2354c85afe66a85b3bc0          5000.0           5000.0   \n",
      "\n",
      "   aps_amt_7d_std  aps_amt_7d_median  aps_amt_7d_max  aps_amt_7d_min  \\\n",
      "0     1892.691910             374.00          4000.0            20.0   \n",
      "1     1414.213562            3000.00          4000.0          2000.0   \n",
      "2       17.183946              19.95            51.5             7.0   \n",
      "3             NaN             900.00           900.0           900.0   \n",
      "4             NaN            5000.00          5000.0          5000.0   \n",
      "\n",
      "   aps_amt_7d_q25  aps_amt_7d_q75  aps_count_7d  ...  aps_has_txn_last_30d  \\\n",
      "0           80.00        1486.000             4  ...                     1   \n",
      "1         2500.00        3500.000             2  ...                     1   \n",
      "2            9.75          30.375             6  ...                     1   \n",
      "3          900.00         900.000             1  ...                     1   \n",
      "4         5000.00        5000.000             1  ...                     1   \n",
      "\n",
      "   aps_last5_amt_mean  aps_last5_amt_std  aps_last5_amt_max  \\\n",
      "0             1192.00        1892.691910             4000.0   \n",
      "1             3000.00        1414.213562             4000.0   \n",
      "2               26.32          17.068890               51.5   \n",
      "3              900.00                NaN              900.0   \n",
      "4             5000.00                NaN             5000.0   \n",
      "\n",
      "   aps_last5_amt_min  aps_first5_amt_mean  aps_first5_amt_std  \\\n",
      "0               20.0              1192.00         1892.691910   \n",
      "1             2000.0              3000.00         1414.213562   \n",
      "2                9.0                25.92           17.591674   \n",
      "3              900.0               900.00                 NaN   \n",
      "4             5000.0              5000.00                 NaN   \n",
      "\n",
      "   aps_first5_amt_max  aps_first5_amt_min  aps_first5_last5_amt_ratio  \n",
      "0              4000.0                20.0                    1.000000  \n",
      "1              4000.0              2000.0                    1.000000  \n",
      "2                51.5                 7.0                    0.984802  \n",
      "3               900.0               900.0                    1.000000  \n",
      "4              5000.0              5000.0                    1.000000  \n",
      "\n",
      "[5 rows x 527 columns]\n"
     ]
    }
   ],
   "source": [
    "print(\"训练集特征列表:\")\n",
    "print(\"=\" * 80)\n",
    "feature_cols = [col for col in train_features.columns if col != 'CUST_NO']\n",
    "for i, col in enumerate(feature_cols, 1):\n",
    "    print(f\"{i:3d}. {col}\")\n",
    "\n",
    "print(f\"\\n总计: {len(feature_cols)} 个特征\")\n",
    "\n",
    "print(\"\\n\" + \"=\" * 80)\n",
    "print(\"特征数据预览:\")\n",
    "print(train_features.head())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
