{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "861d2818",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d1507859",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ff0951d4",
   "metadata": {},
   "source": [
    "# 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a96f58d3",
   "metadata": {},
   "source": [
    "## 数据导入通用函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "6ef1ed67",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7e4b1e0f",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "374a4ad3",
   "metadata": {},
   "source": [
    "### 训练集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "dd412459",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 TRAIN_AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 TRAIN_ASSET_data 已加载为 DataFrame\n",
      "数据集 TRAIN_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 TRAIN_MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 TRAIN_NATURE_data 已加载为 DataFrame\n",
      "数据集 TRAIN_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TARGET_INFO_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TRAIN_TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = '../Train_Data'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5521715a",
   "metadata": {},
   "source": [
    "### A测试集导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a7221766",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 A_AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 A_ASSET_data 已加载为 DataFrame\n",
      "数据集 A_CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 A_MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 A_MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 A_NATURE_data 已加载为 DataFrame\n",
      "数据集 A_PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 A_TARGET_data 已加载为 DataFrame\n",
      "数据集 A_TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 A_TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 A_TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "A_load_dt = '../DATA'\n",
    "A_data_name = load_data_from_directory(A_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b0b33f09",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "2c86c30f",
   "metadata": {},
   "outputs": [],
   "source": [
    "MB_TRNFLW_data = A_MB_TRNFLW_data.copy()\n",
    "MB_QRYTRNFLW_data = A_MB_QRYTRNFLW_data.copy()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cb6e2471",
   "metadata": {},
   "source": [
    "## 通用数据处理函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "bbd066bd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "参考日期(最大日期): 2025-06-30 00:00:00\n",
      "数据预处理完成!\n",
      "金融流水数据形状: (15420, 9)\n",
      "非金融流水数据形状: (241065, 8)\n"
     ]
    }
   ],
   "source": [
    "# 数据预处理 - 转换日期格式\n",
    "MB_TRNFLW_data['DATE'] = pd.to_datetime(MB_TRNFLW_data['DATE'], format='%Y%m%d')\n",
    "MB_QRYTRNFLW_data['DATE'] = pd.to_datetime(MB_QRYTRNFLW_data['DATE'], format='%Y%m%d')\n",
    "\n",
    "# 获取数据的最大日期作为参考日期\n",
    "reference_date = MB_TRNFLW_data['DATE'].max()\n",
    "print(f\"参考日期(最大日期): {reference_date}\")\n",
    "\n",
    "# 添加辅助时间特征\n",
    "MB_TRNFLW_data['weekday'] = MB_TRNFLW_data['DATE'].dt.dayofweek  # 0=周一, 6=周日\n",
    "MB_TRNFLW_data['day'] = MB_TRNFLW_data['DATE'].dt.day\n",
    "MB_TRNFLW_data['week'] = MB_TRNFLW_data['DATE'].dt.isocalendar().week\n",
    "MB_TRNFLW_data['is_weekend'] = MB_TRNFLW_data['weekday'].apply(lambda x: 1 if x >= 5 else 0)\n",
    "MB_TRNFLW_data['days_from_ref'] = (reference_date - MB_TRNFLW_data['DATE']).dt.days\n",
    "\n",
    "MB_QRYTRNFLW_data['weekday'] = MB_QRYTRNFLW_data['DATE'].dt.dayofweek\n",
    "MB_QRYTRNFLW_data['day'] = MB_QRYTRNFLW_data['DATE'].dt.day\n",
    "MB_QRYTRNFLW_data['week'] = MB_QRYTRNFLW_data['DATE'].dt.isocalendar().week\n",
    "MB_QRYTRNFLW_data['is_weekend'] = MB_QRYTRNFLW_data['weekday'].apply(lambda x: 1 if x >= 5 else 0)\n",
    "MB_QRYTRNFLW_data['days_from_ref'] = (reference_date - MB_QRYTRNFLW_data['DATE']).dt.days\n",
    "\n",
    "print(\"数据预处理完成!\")\n",
    "print(f\"金融流水数据形状: {MB_TRNFLW_data.shape}\")\n",
    "print(f\"非金融流水数据形状: {MB_QRYTRNFLW_data.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26485a88",
   "metadata": {},
   "source": [
    "## 掌银金融性流水表特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "78be6eef",
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_mb_trnflw_features(df, reference_date):\n",
    "    \"\"\"\n",
    "    创建掌银金融性流水表的详细特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 掌银金融性流水表DataFrame\n",
    "    - reference_date: 参考日期\n",
    "    \n",
    "    返回:\n",
    "    - 特征DataFrame\n",
    "    \"\"\"\n",
    "    print(\"开始构建掌银金融性流水表特征...\")\n",
    "    \n",
    "    features = pd.DataFrame()\n",
    "    \n",
    "    # ===== 1. 基础统计特征 =====\n",
    "    print(\"1. 基础统计特征...\")\n",
    "    \n",
    "    # 1.1 整体统计\n",
    "    basic_stats = df.groupby('CUST_NO').agg({\n",
    "        'AMOUNT': ['count', 'sum', 'mean', 'median', 'std', 'min', 'max', \n",
    "                   lambda x: x.quantile(0.25), lambda x: x.quantile(0.75)],\n",
    "        'TRANSCODE': 'nunique',\n",
    "        'DATE': ['min', 'max'],\n",
    "    }).reset_index()\n",
    "    \n",
    "    basic_stats.columns = ['CUST_NO', \n",
    "                           'mb_trnflw_count', 'mb_trnflw_amount_sum', 'mb_trnflw_amount_mean',\n",
    "                           'mb_trnflw_amount_median', 'mb_trnflw_amount_std', 'mb_trnflw_amount_min',\n",
    "                           'mb_trnflw_amount_max', 'mb_trnflw_amount_q25', 'mb_trnflw_amount_q75',\n",
    "                           'mb_trnflw_transcode_nunique', 'mb_trnflw_first_date', 'mb_trnflw_last_date']\n",
    "    \n",
    "    # 1.2 派生特征\n",
    "    basic_stats['mb_trnflw_amount_range'] = basic_stats['mb_trnflw_amount_max'] - basic_stats['mb_trnflw_amount_min']\n",
    "    basic_stats['mb_trnflw_amount_iqr'] = basic_stats['mb_trnflw_amount_q75'] - basic_stats['mb_trnflw_amount_q25']\n",
    "    basic_stats['mb_trnflw_amount_cv'] = basic_stats['mb_trnflw_amount_std'] / (basic_stats['mb_trnflw_amount_mean'] + 1)\n",
    "    \n",
    "    # 日期特征\n",
    "    basic_stats['mb_trnflw_days_since_first'] = (reference_date - basic_stats['mb_trnflw_first_date']).dt.days\n",
    "    basic_stats['mb_trnflw_days_since_last'] = (reference_date - basic_stats['mb_trnflw_last_date']).dt.days\n",
    "    basic_stats['mb_trnflw_active_days'] = (basic_stats['mb_trnflw_last_date'] - basic_stats['mb_trnflw_first_date']).dt.days + 1\n",
    "    basic_stats['mb_trnflw_freq_per_day'] = basic_stats['mb_trnflw_count'] / (basic_stats['mb_trnflw_active_days'] + 1)\n",
    "    \n",
    "    features = features.merge(basic_stats.drop(['mb_trnflw_first_date', 'mb_trnflw_last_date'], axis=1), \n",
    "                              on='CUST_NO', how='outer') if not features.empty else basic_stats.drop(['mb_trnflw_first_date', 'mb_trnflw_last_date'], axis=1)\n",
    "    \n",
    "    # ===== 2. 时间窗口特征 =====\n",
    "    print(\"2. 时间窗口特征...\")\n",
    "    \n",
    "    time_windows = [1, 3, 7, 14, 30, 60, 90]\n",
    "    for window in time_windows:\n",
    "        df_window = df[df['days_from_ref'] < window]\n",
    "        \n",
    "        if len(df_window) > 0:\n",
    "            window_stats = df_window.groupby('CUST_NO').agg({\n",
    "                'AMOUNT': ['count', 'sum', 'mean', 'max'],\n",
    "                'TRANSCODE': 'nunique'\n",
    "            }).reset_index()\n",
    "            \n",
    "            window_stats.columns = ['CUST_NO',\n",
    "                                    f'mb_trnflw_count_last_{window}d',\n",
    "                                    f'mb_trnflw_amount_sum_last_{window}d',\n",
    "                                    f'mb_trnflw_amount_mean_last_{window}d',\n",
    "                                    f'mb_trnflw_amount_max_last_{window}d',\n",
    "                                    f'mb_trnflw_transcode_nunique_last_{window}d']\n",
    "            \n",
    "            features = features.merge(window_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 3. 周末vs工作日特征 =====\n",
    "    print(\"3. 周末vs工作日特征...\")\n",
    "    \n",
    "    # 周末特征\n",
    "    df_weekend = df[df['is_weekend'] == 1]\n",
    "    if len(df_weekend) > 0:\n",
    "        weekend_stats = df_weekend.groupby('CUST_NO').agg({\n",
    "            'AMOUNT': ['count', 'sum', 'mean'],\n",
    "            'TRANSCODE': 'nunique'\n",
    "        }).reset_index()\n",
    "        weekend_stats.columns = ['CUST_NO', 'mb_trnflw_weekend_count', 'mb_trnflw_weekend_amount_sum',\n",
    "                                 'mb_trnflw_weekend_amount_mean', 'mb_trnflw_weekend_transcode_nunique']\n",
    "        features = features.merge(weekend_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 工作日特征\n",
    "    df_weekday = df[df['is_weekend'] == 0]\n",
    "    if len(df_weekday) > 0:\n",
    "        weekday_stats = df_weekday.groupby('CUST_NO').agg({\n",
    "            'AMOUNT': ['count', 'sum', 'mean'],\n",
    "            'TRANSCODE': 'nunique'\n",
    "        }).reset_index()\n",
    "        weekday_stats.columns = ['CUST_NO', 'mb_trnflw_weekday_count', 'mb_trnflw_weekday_amount_sum',\n",
    "                                 'mb_trnflw_weekday_amount_mean', 'mb_trnflw_weekday_transcode_nunique']\n",
    "        features = features.merge(weekday_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周末占比\n",
    "    if 'mb_trnflw_weekend_count' in features.columns:\n",
    "        features['mb_trnflw_weekend_ratio'] = features['mb_trnflw_weekend_count'] / (features['mb_trnflw_count'] + 1)\n",
    "        features['mb_trnflw_weekend_amount_ratio'] = features['mb_trnflw_weekend_amount_sum'] / (features['mb_trnflw_amount_sum'] + 1)\n",
    "    \n",
    "    # ===== 4. 交易码(TRANSCODE)深度特征 =====\n",
    "    print(\"4. 交易码深度特征...\")\n",
    "    \n",
    "    # 最常用的交易码\n",
    "    top_transcodes = df['TRANSCODE'].value_counts().head(20).index.tolist()\n",
    "    for idx, transcode in enumerate(top_transcodes):\n",
    "        df_transcode = df[df['TRANSCODE'] == transcode]\n",
    "        if len(df_transcode) > 0:\n",
    "            transcode_stats = df_transcode.groupby('CUST_NO').agg({\n",
    "                'AMOUNT': ['count', 'sum', 'mean']\n",
    "            }).reset_index()\n",
    "            transcode_stats.columns = ['CUST_NO', \n",
    "                                       f'mb_trnflw_top{idx+1}_transcode_count',\n",
    "                                       f'mb_trnflw_top{idx+1}_transcode_amount_sum',\n",
    "                                       f'mb_trnflw_top{idx+1}_transcode_amount_mean']\n",
    "            features = features.merge(transcode_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 5. 金额分布特征 =====\n",
    "    print(\"5. 金额分布特征...\")\n",
    "    \n",
    "    # 不同金额区间的交易次数\n",
    "    amount_bins = [0, 100, 500, 1000, 5000, 10000, 50000, float('inf')]\n",
    "    amount_labels = ['0_100', '100_500', '500_1k', '1k_5k', '5k_10k', '10k_50k', '50k_plus']\n",
    "    \n",
    "    df['amount_bin'] = pd.cut(df['AMOUNT'], bins=amount_bins, labels=amount_labels)\n",
    "    \n",
    "    for label in amount_labels:\n",
    "        df_bin = df[df['amount_bin'] == label]\n",
    "        if len(df_bin) > 0:\n",
    "            amount_bin_count = df_bin.groupby('CUST_NO').size().reset_index(name=f'mb_trnflw_amount_{label}_count')\n",
    "            features = features.merge(amount_bin_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 6. 时间趋势特征 =====\n",
    "    print(\"6. 时间趋势特征...\")\n",
    "    \n",
    "    # 按周统计\n",
    "    df_weekly = df.groupby(['CUST_NO', 'week']).agg({\n",
    "        'AMOUNT': ['count', 'sum']\n",
    "    }).reset_index()\n",
    "    \n",
    "    df_weekly.columns = ['CUST_NO', 'week', 'count', 'sum']\n",
    "    \n",
    "    # 计算周交易次数的趋势\n",
    "    weekly_trend = df_weekly.groupby('CUST_NO')['count'].agg(['mean', 'std', 'min', 'max']).reset_index()\n",
    "    weekly_trend.columns = ['CUST_NO', 'mb_trnflw_weekly_count_mean', 'mb_trnflw_weekly_count_std',\n",
    "                            'mb_trnflw_weekly_count_min', 'mb_trnflw_weekly_count_max']\n",
    "    \n",
    "    features = features.merge(weekly_trend, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 7. 活跃度特征 =====\n",
    "    print(\"7. 活跃度特征...\")\n",
    "    \n",
    "    # 活跃天数\n",
    "    active_days = df.groupby('CUST_NO')['DATE'].nunique().reset_index(name='mb_trnflw_active_days_count')\n",
    "    features = features.merge(active_days, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 活跃度 = 活跃天数 / 总天数\n",
    "    features['mb_trnflw_activity_rate'] = features['mb_trnflw_active_days_count'] / (features['mb_trnflw_active_days'] + 1)\n",
    "    \n",
    "    # 连续活跃天数\n",
    "    def calc_consecutive_days(group):\n",
    "        dates = sorted(group['DATE'].unique())\n",
    "        if len(dates) == 0:\n",
    "            return 0\n",
    "        max_consecutive = 1\n",
    "        current_consecutive = 1\n",
    "        for i in range(1, len(dates)):\n",
    "            if (dates[i] - dates[i-1]).days == 1:\n",
    "                current_consecutive += 1\n",
    "                max_consecutive = max(max_consecutive, current_consecutive)\n",
    "            else:\n",
    "                current_consecutive = 1\n",
    "        return max_consecutive\n",
    "    \n",
    "    consecutive_days = df.groupby('CUST_NO').apply(calc_consecutive_days).reset_index(name='mb_trnflw_max_consecutive_days')\n",
    "    features = features.merge(consecutive_days, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 8. 高级统计特征 =====\n",
    "    print(\"8. 高级统计特征...\")\n",
    "    \n",
    "    # 偏度和峰度\n",
    "    from scipy.stats import skew, kurtosis\n",
    "    \n",
    "    skew_kurt = df.groupby('CUST_NO')['AMOUNT'].agg([\n",
    "        ('mb_trnflw_amount_skew', lambda x: skew(x) if len(x) > 1 else 0),\n",
    "        ('mb_trnflw_amount_kurt', lambda x: kurtosis(x) if len(x) > 1 else 0)\n",
    "    ]).reset_index()\n",
    "    \n",
    "    features = features.merge(skew_kurt, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 0金额交易占比\n",
    "    df_zero = df[df['AMOUNT'] == 0]\n",
    "    if len(df_zero) > 0:\n",
    "        zero_amount = df_zero.groupby('CUST_NO').size().reset_index(name='mb_trnflw_zero_amount_count')\n",
    "        features = features.merge(zero_amount, on='CUST_NO', how='left')\n",
    "        features['mb_trnflw_zero_amount_ratio'] = features['mb_trnflw_zero_amount_count'] / (features['mb_trnflw_count'] + 1)\n",
    "    \n",
    "    # ===== 填充缺失值 =====\n",
    "    features = features.fillna(0)\n",
    "    \n",
    "    print(f\"掌银金融性流水表特征构建完成! 特征数量: {len(features.columns) - 1}\")\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ac42f10c",
   "metadata": {},
   "source": [
    "### 掌银非金融性流水表特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "7f265a94",
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_mb_qrytrnflw_features(df, reference_date):\n",
    "    \"\"\"\n",
    "    创建掌银非金融性流水表的详细特征\n",
    "    \n",
    "    参数:\n",
    "    - df: 掌银非金融性流水表DataFrame\n",
    "    - reference_date: 参考日期\n",
    "    \n",
    "    返回:\n",
    "    - 特征DataFrame\n",
    "    \"\"\"\n",
    "    print(\"开始构建掌银非金融性流水表特征...\")\n",
    "    \n",
    "    features = pd.DataFrame()\n",
    "    \n",
    "    # ===== 1. 基础统计特征 =====\n",
    "    print(\"1. 基础统计特征...\")\n",
    "    \n",
    "    # 1.1 整体统计\n",
    "    basic_stats = df.groupby('CUST_NO').agg({\n",
    "        'TRANSCODE': ['count', 'nunique'],\n",
    "        'DATE': ['min', 'max'],\n",
    "    }).reset_index()\n",
    "    \n",
    "    basic_stats.columns = ['CUST_NO', \n",
    "                           'mb_qrytrnflw_count', 'mb_qrytrnflw_transcode_nunique',\n",
    "                           'mb_qrytrnflw_first_date', 'mb_qrytrnflw_last_date']\n",
    "    \n",
    "    # 1.2 派生特征\n",
    "    basic_stats['mb_qrytrnflw_days_since_first'] = (reference_date - basic_stats['mb_qrytrnflw_first_date']).dt.days\n",
    "    basic_stats['mb_qrytrnflw_days_since_last'] = (reference_date - basic_stats['mb_qrytrnflw_last_date']).dt.days\n",
    "    basic_stats['mb_qrytrnflw_active_days'] = (basic_stats['mb_qrytrnflw_last_date'] - basic_stats['mb_qrytrnflw_first_date']).dt.days + 1\n",
    "    basic_stats['mb_qrytrnflw_freq_per_day'] = basic_stats['mb_qrytrnflw_count'] / (basic_stats['mb_qrytrnflw_active_days'] + 1)\n",
    "    \n",
    "    features = features.merge(basic_stats.drop(['mb_qrytrnflw_first_date', 'mb_qrytrnflw_last_date'], axis=1), \n",
    "                              on='CUST_NO', how='outer') if not features.empty else basic_stats.drop(['mb_qrytrnflw_first_date', 'mb_qrytrnflw_last_date'], axis=1)\n",
    "    \n",
    "    # ===== 2. 时间窗口特征 =====\n",
    "    print(\"2. 时间窗口特征...\")\n",
    "    \n",
    "    time_windows = [1, 3, 7, 14, 30, 60, 90]\n",
    "    for window in time_windows:\n",
    "        df_window = df[df['days_from_ref'] < window]\n",
    "        \n",
    "        if len(df_window) > 0:\n",
    "            window_stats = df_window.groupby('CUST_NO').agg({\n",
    "                'TRANSCODE': ['count', 'nunique']\n",
    "            }).reset_index()\n",
    "            \n",
    "            window_stats.columns = ['CUST_NO',\n",
    "                                    f'mb_qrytrnflw_count_last_{window}d',\n",
    "                                    f'mb_qrytrnflw_transcode_nunique_last_{window}d']\n",
    "            \n",
    "            features = features.merge(window_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 3. 周末vs工作日特征 =====\n",
    "    print(\"3. 周末vs工作日特征...\")\n",
    "    \n",
    "    # 周末特征\n",
    "    df_weekend = df[df['is_weekend'] == 1]\n",
    "    if len(df_weekend) > 0:\n",
    "        weekend_stats = df_weekend.groupby('CUST_NO').agg({\n",
    "            'TRANSCODE': ['count', 'nunique']\n",
    "        }).reset_index()\n",
    "        weekend_stats.columns = ['CUST_NO', 'mb_qrytrnflw_weekend_count', 'mb_qrytrnflw_weekend_transcode_nunique']\n",
    "        features = features.merge(weekend_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 工作日特征\n",
    "    df_weekday = df[df['is_weekend'] == 0]\n",
    "    if len(df_weekday) > 0:\n",
    "        weekday_stats = df_weekday.groupby('CUST_NO').agg({\n",
    "            'TRANSCODE': ['count', 'nunique']\n",
    "        }).reset_index()\n",
    "        weekday_stats.columns = ['CUST_NO', 'mb_qrytrnflw_weekday_count', 'mb_qrytrnflw_weekday_transcode_nunique']\n",
    "        features = features.merge(weekday_stats, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 周末占比\n",
    "    if 'mb_qrytrnflw_weekend_count' in features.columns:\n",
    "        features['mb_qrytrnflw_weekend_ratio'] = features['mb_qrytrnflw_weekend_count'] / (features['mb_qrytrnflw_count'] + 1)\n",
    "    \n",
    "    # ===== 4. 交易码(TRANSCODE)深度特征 =====\n",
    "    print(\"4. 交易码深度特征...\")\n",
    "    \n",
    "    # 最常用的交易码\n",
    "    top_transcodes = df['TRANSCODE'].value_counts().head(30).index.tolist()\n",
    "    for idx, transcode in enumerate(top_transcodes):\n",
    "        df_transcode = df[df['TRANSCODE'] == transcode]\n",
    "        if len(df_transcode) > 0:\n",
    "            transcode_count = df_transcode.groupby('CUST_NO').size().reset_index(name=f'mb_qrytrnflw_top{idx+1}_transcode_count')\n",
    "            features = features.merge(transcode_count, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 5. 时间趋势特征 =====\n",
    "    print(\"5. 时间趋势特征...\")\n",
    "    \n",
    "    # 按周统计\n",
    "    df_weekly = df.groupby(['CUST_NO', 'week']).size().reset_index(name='count')\n",
    "    \n",
    "    # 计算周查询次数的趋势\n",
    "    weekly_trend = df_weekly.groupby('CUST_NO')['count'].agg(['mean', 'std', 'min', 'max']).reset_index()\n",
    "    weekly_trend.columns = ['CUST_NO', 'mb_qrytrnflw_weekly_count_mean', 'mb_qrytrnflw_weekly_count_std',\n",
    "                            'mb_qrytrnflw_weekly_count_min', 'mb_qrytrnflw_weekly_count_max']\n",
    "    \n",
    "    features = features.merge(weekly_trend, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 按日统计\n",
    "    df_daily = df.groupby(['CUST_NO', 'DATE']).size().reset_index(name='count')\n",
    "    \n",
    "    # 计算日查询次数的趋势\n",
    "    daily_trend = df_daily.groupby('CUST_NO')['count'].agg(['mean', 'std', 'min', 'max']).reset_index()\n",
    "    daily_trend.columns = ['CUST_NO', 'mb_qrytrnflw_daily_count_mean', 'mb_qrytrnflw_daily_count_std',\n",
    "                           'mb_qrytrnflw_daily_count_min', 'mb_qrytrnflw_daily_count_max']\n",
    "    \n",
    "    features = features.merge(daily_trend, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 6. 活跃度特征 =====\n",
    "    print(\"6. 活跃度特征...\")\n",
    "    \n",
    "    # 活跃天数\n",
    "    active_days = df.groupby('CUST_NO')['DATE'].nunique().reset_index(name='mb_qrytrnflw_active_days_count')\n",
    "    features = features.merge(active_days, on='CUST_NO', how='left')\n",
    "    \n",
    "    # 活跃度 = 活跃天数 / 总天数\n",
    "    features['mb_qrytrnflw_activity_rate'] = features['mb_qrytrnflw_active_days_count'] / (features['mb_qrytrnflw_active_days'] + 1)\n",
    "    \n",
    "    # 连续活跃天数\n",
    "    def calc_consecutive_days(group):\n",
    "        dates = sorted(group['DATE'].unique())\n",
    "        if len(dates) == 0:\n",
    "            return 0\n",
    "        max_consecutive = 1\n",
    "        current_consecutive = 1\n",
    "        for i in range(1, len(dates)):\n",
    "            if (dates[i] - dates[i-1]).days == 1:\n",
    "                current_consecutive += 1\n",
    "                max_consecutive = max(max_consecutive, current_consecutive)\n",
    "            else:\n",
    "                current_consecutive = 1\n",
    "        return max_consecutive\n",
    "    \n",
    "    consecutive_days = df.groupby('CUST_NO').apply(calc_consecutive_days).reset_index(name='mb_qrytrnflw_max_consecutive_days')\n",
    "    features = features.merge(consecutive_days, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 7. 查询模式特征(如果有时间字段) =====\n",
    "    print(\"7. 查询模式特征...\")\n",
    "    \n",
    "    # 注意: 数据中没有小时字段,这里添加每个时段的分布(按天的时段简化处理)\n",
    "    # 按一天分4个时段: 0-6点,6-12点,12-18点,18-24点\n",
    "    # 由于原始数据没有小时,这里简化处理,仅统计早中晚数量分布\n",
    "    \n",
    "    # ===== 8. 查询频率变化特征 =====\n",
    "    print(\"8. 查询频率变化特征...\")\n",
    "    \n",
    "    # 计算前后两个时间窗口的变化率\n",
    "    # 最近7天 vs 前7天\n",
    "    if 'mb_qrytrnflw_count_last_7d' in features.columns and 'mb_qrytrnflw_count_last_14d' in features.columns:\n",
    "        features['mb_qrytrnflw_count_7d_change'] = (features['mb_qrytrnflw_count_last_7d'] - \n",
    "                                                     (features['mb_qrytrnflw_count_last_14d'] - features['mb_qrytrnflw_count_last_7d']))\n",
    "        features['mb_qrytrnflw_count_7d_change_ratio'] = features['mb_qrytrnflw_count_7d_change'] / (\n",
    "            features['mb_qrytrnflw_count_last_14d'] - features['mb_qrytrnflw_count_last_7d'] + 1)\n",
    "    \n",
    "    # 最近30天 vs 前30天\n",
    "    if 'mb_qrytrnflw_count_last_30d' in features.columns and 'mb_qrytrnflw_count_last_60d' in features.columns:\n",
    "        features['mb_qrytrnflw_count_30d_change'] = (features['mb_qrytrnflw_count_last_30d'] - \n",
    "                                                      (features['mb_qrytrnflw_count_last_60d'] - features['mb_qrytrnflw_count_last_30d']))\n",
    "        features['mb_qrytrnflw_count_30d_change_ratio'] = features['mb_qrytrnflw_count_30d_change'] / (\n",
    "            features['mb_qrytrnflw_count_last_60d'] - features['mb_qrytrnflw_count_last_30d'] + 1)\n",
    "    \n",
    "    # ===== 9. 交易码集中度特征 =====\n",
    "    print(\"9. 交易码集中度特征...\")\n",
    "    \n",
    "    # 最常用交易码的占比\n",
    "    transcode_dist = df.groupby(['CUST_NO', 'TRANSCODE']).size().reset_index(name='count')\n",
    "    transcode_max = transcode_dist.groupby('CUST_NO')['count'].max().reset_index(name='mb_qrytrnflw_top1_transcode_count')\n",
    "    \n",
    "    features = features.merge(transcode_max, on='CUST_NO', how='left')\n",
    "    \n",
    "    if 'mb_qrytrnflw_top1_transcode_count' in features.columns:\n",
    "        features['mb_qrytrnflw_top1_transcode_ratio'] = features['mb_qrytrnflw_top1_transcode_count'] / (features['mb_qrytrnflw_count'] + 1)\n",
    "    \n",
    "    # 交易码熵(衡量多样性)\n",
    "    def calc_entropy(group):\n",
    "        counts = group['TRANSCODE'].value_counts()\n",
    "        probs = counts / counts.sum()\n",
    "        entropy = -np.sum(probs * np.log(probs + 1e-10))\n",
    "        return entropy\n",
    "    \n",
    "    transcode_entropy = df.groupby('CUST_NO').apply(calc_entropy).reset_index(name='mb_qrytrnflw_transcode_entropy')\n",
    "    features = features.merge(transcode_entropy, on='CUST_NO', how='left')\n",
    "    \n",
    "    # ===== 填充缺失值 =====\n",
    "    features = features.fillna(0)\n",
    "    \n",
    "    print(f\"掌银非金融性流水表特征构建完成! 特征数量: {len(features.columns) - 1}\")\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "587b0769",
   "metadata": {},
   "source": [
    "### 金融与非金融流水交叉特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "7dc0f08c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_cross_features(trnflw_features, qrytrnflw_features):\n",
    "    \"\"\"\n",
    "    创建金融与非金融流水的交叉特征\n",
    "    \n",
    "    参数:\n",
    "    - trnflw_features: 金融流水特征DataFrame\n",
    "    - qrytrnflw_features: 非金融流水特征DataFrame\n",
    "    \n",
    "    返回:\n",
    "    - 交叉特征DataFrame\n",
    "    \"\"\"\n",
    "    print(\"开始构建交叉特征...\")\n",
    "    \n",
    "    # 合并两个特征表\n",
    "    cross_features = trnflw_features.merge(qrytrnflw_features, on='CUST_NO', how='outer').fillna(0)\n",
    "    \n",
    "    # ===== 1. 比率特征 =====\n",
    "    print(\"1. 比率特征...\")\n",
    "    \n",
    "    # 金融交易次数 / 非金融查询次数\n",
    "    cross_features['mb_trnflw_qrytrnflw_count_ratio'] = cross_features['mb_trnflw_count'] / (cross_features['mb_qrytrnflw_count'] + 1)\n",
    "    \n",
    "    # 金融交易金额 / 非金融查询次数\n",
    "    cross_features['mb_trnflw_amount_per_qrytrnflw'] = cross_features['mb_trnflw_amount_sum'] / (cross_features['mb_qrytrnflw_count'] + 1)\n",
    "    \n",
    "    # 交易码多样性比较\n",
    "    cross_features['mb_trnflw_qrytrnflw_transcode_ratio'] = cross_features['mb_trnflw_transcode_nunique'] / (cross_features['mb_qrytrnflw_transcode_nunique'] + 1)\n",
    "    \n",
    "    # ===== 2. 活跃度差异特征 =====\n",
    "    print(\"2. 活跃度差异特征...\")\n",
    "    \n",
    "    # 活跃天数差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_active_days_diff'] = cross_features['mb_qrytrnflw_active_days_count'] - cross_features['mb_trnflw_active_days_count']\n",
    "    \n",
    "    # 活跃率差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_activity_rate_diff'] = cross_features['mb_qrytrnflw_activity_rate'] - cross_features['mb_trnflw_activity_rate']\n",
    "    \n",
    "    # 频率差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_freq_diff'] = cross_features['mb_qrytrnflw_freq_per_day'] - cross_features['mb_trnflw_freq_per_day']\n",
    "    \n",
    "    # ===== 3. 时间窗口交叉特征 =====\n",
    "    print(\"3. 时间窗口交叉特征...\")\n",
    "    \n",
    "    time_windows = [7, 14, 30, 60, 90]\n",
    "    for window in time_windows:\n",
    "        if f'mb_trnflw_count_last_{window}d' in cross_features.columns and f'mb_qrytrnflw_count_last_{window}d' in cross_features.columns:\n",
    "            # 比率\n",
    "            cross_features[f'mb_trnflw_qrytrnflw_count_ratio_last_{window}d'] = (\n",
    "                cross_features[f'mb_trnflw_count_last_{window}d'] / (cross_features[f'mb_qrytrnflw_count_last_{window}d'] + 1)\n",
    "            )\n",
    "            \n",
    "            # 差异\n",
    "            cross_features[f'mb_trnflw_qrytrnflw_count_diff_last_{window}d'] = (\n",
    "                cross_features[f'mb_qrytrnflw_count_last_{window}d'] - cross_features[f'mb_trnflw_count_last_{window}d']\n",
    "            )\n",
    "    \n",
    "    # ===== 4. 周末行为交叉特征 =====\n",
    "    print(\"4. 周末行为交叉特征...\")\n",
    "    \n",
    "    # 周末金融交易 / 周末非金融查询\n",
    "    cross_features['mb_trnflw_qrytrnflw_weekend_count_ratio'] = (\n",
    "        cross_features['mb_trnflw_weekend_count'] / (cross_features['mb_qrytrnflw_weekend_count'] + 1)\n",
    "    )\n",
    "    \n",
    "    # 周末占比差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_weekend_ratio_diff'] = (\n",
    "        cross_features['mb_qrytrnflw_weekend_ratio'] - cross_features['mb_trnflw_weekend_ratio']\n",
    "    )\n",
    "    \n",
    "    # ===== 5. 最后活跃时间差异 =====\n",
    "    print(\"5. 最后活跃时间差异...\")\n",
    "    \n",
    "    # 最后交易日期差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_last_date_diff'] = (\n",
    "        cross_features['mb_trnflw_days_since_last'] - cross_features['mb_qrytrnflw_days_since_last']\n",
    "    )\n",
    "    \n",
    "    # 首次活跃日期差异\n",
    "    cross_features['mb_trnflw_qrytrnflw_first_date_diff'] = (\n",
    "        cross_features['mb_trnflw_days_since_first'] - cross_features['mb_qrytrnflw_days_since_first']\n",
    "    )\n",
    "    \n",
    "    # ===== 6. 行为一致性特征 =====\n",
    "    print(\"6. 行为一致性特征...\")\n",
    "    \n",
    "    # 周末行为一致性(都在周末活跃或都不在周末活跃)\n",
    "    cross_features['mb_trnflw_qrytrnflw_weekend_consistency'] = (\n",
    "        (cross_features['mb_trnflw_weekend_ratio'] > 0) & (cross_features['mb_qrytrnflw_weekend_ratio'] > 0)\n",
    "    ).astype(int)\n",
    "    \n",
    "    # 活跃度一致性(都活跃或都不活跃)\n",
    "    cross_features['mb_trnflw_qrytrnflw_activity_consistency'] = (\n",
    "        (cross_features['mb_trnflw_activity_rate'] > 0.5) == (cross_features['mb_qrytrnflw_activity_rate'] > 0.5)\n",
    "    ).astype(int)\n",
    "    \n",
    "    # ===== 7. 复合指标 =====\n",
    "    print(\"7. 复合指标...\")\n",
    "    \n",
    "    # 总活跃度指数 = 金融交易次数 + 非金融查询次数\n",
    "    cross_features['mb_total_activity_index'] = cross_features['mb_trnflw_count'] + cross_features['mb_qrytrnflw_count']\n",
    "    \n",
    "    # 金融活跃度占比\n",
    "    cross_features['mb_trnflw_activity_proportion'] = cross_features['mb_trnflw_count'] / (cross_features['mb_total_activity_index'] + 1)\n",
    "    \n",
    "    # 综合活跃天数\n",
    "    cross_features['mb_total_active_days'] = cross_features['mb_trnflw_active_days_count'] + cross_features['mb_qrytrnflw_active_days_count']\n",
    "    \n",
    "    # ===== 8. 趋势一致性特征 =====\n",
    "    print(\"8. 趋势一致性特征...\")\n",
    "    \n",
    "    # 周统计趋势一致性\n",
    "    if 'mb_trnflw_weekly_count_mean' in cross_features.columns and 'mb_qrytrnflw_weekly_count_mean' in cross_features.columns:\n",
    "        cross_features['mb_weekly_trend_ratio'] = (\n",
    "            cross_features['mb_trnflw_weekly_count_mean'] / (cross_features['mb_qrytrnflw_weekly_count_mean'] + 1)\n",
    "        )\n",
    "    \n",
    "    print(f\"交叉特征构建完成! 新增特征数量: {len(cross_features.columns) - len(trnflw_features.columns) - len(qrytrnflw_features.columns) + 1}\")\n",
    "    \n",
    "    return cross_features"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e74f88ec",
   "metadata": {},
   "source": [
    "## 执行特征工程并保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "639c253c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "开始执行特征工程\n",
      "================================================================================\n",
      "\n",
      "【步骤1/3】生成掌银金融性流水表特征...\n",
      "开始构建掌银金融性流水表特征...\n",
      "1. 基础统计特征...\n",
      "2. 时间窗口特征...\n",
      "3. 周末vs工作日特征...\n",
      "4. 交易码深度特征...\n",
      "5. 金额分布特征...\n",
      "6. 时间趋势特征...\n",
      "7. 活跃度特征...\n",
      "8. 高级统计特征...\n",
      "掌银金融性流水表特征构建完成! 特征数量: 140\n",
      "✓ 金融流水特征维度: (1400, 141)\n",
      "\n",
      "【步骤2/3】生成掌银非金融性流水表特征...\n",
      "开始构建掌银非金融性流水表特征...\n",
      "1. 基础统计特征...\n",
      "2. 时间窗口特征...\n",
      "3. 周末vs工作日特征...\n",
      "4. 交易码深度特征...\n",
      "5. 时间趋势特征...\n",
      "6. 活跃度特征...\n",
      "7. 查询模式特征...\n",
      "8. 查询频率变化特征...\n",
      "9. 交易码集中度特征...\n",
      "掌银非金融性流水表特征构建完成! 特征数量: 72\n",
      "✓ 非金融流水特征维度: (3128, 73)\n",
      "\n",
      "【步骤3/3】生成交叉特征...\n",
      "开始构建交叉特征...\n",
      "1. 比率特征...\n",
      "2. 活跃度差异特征...\n",
      "3. 时间窗口交叉特征...\n",
      "4. 周末行为交叉特征...\n",
      "5. 最后活跃时间差异...\n",
      "6. 行为一致性特征...\n",
      "7. 复合指标...\n",
      "8. 趋势一致性特征...\n",
      "交叉特征构建完成! 新增特征数量: 26\n",
      "✓ 最终特征维度: (3128, 239)\n",
      "\n",
      "================================================================================\n",
      "特征工程完成!\n",
      "================================================================================\n",
      "总特征数量: 238\n",
      "客户数量: 3128\n"
     ]
    }
   ],
   "source": [
    "# 执行特征生成\n",
    "print(\"=\"*80)\n",
    "print(\"开始执行特征工程\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "# 生成掌银金融性流水表特征\n",
    "print(\"\\n【步骤1/3】生成掌银金融性流水表特征...\")\n",
    "mb_trnflw_features = create_mb_trnflw_features(MB_TRNFLW_data, reference_date)\n",
    "print(f\"✓ 金融流水特征维度: {mb_trnflw_features.shape}\")\n",
    "\n",
    "# 生成掌银非金融性流水表特征\n",
    "print(\"\\n【步骤2/3】生成掌银非金融性流水表特征...\")\n",
    "mb_qrytrnflw_features = create_mb_qrytrnflw_features(MB_QRYTRNFLW_data, reference_date)\n",
    "print(f\"✓ 非金融流水特征维度: {mb_qrytrnflw_features.shape}\")\n",
    "\n",
    "# 生成交叉特征\n",
    "print(\"\\n【步骤3/3】生成交叉特征...\")\n",
    "final_features = create_cross_features(mb_trnflw_features, mb_qrytrnflw_features)\n",
    "print(f\"✓ 最终特征维度: {final_features.shape}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"特征工程完成!\")\n",
    "print(\"=\"*80)\n",
    "print(f\"总特征数量: {len(final_features.columns) - 1}\")\n",
    "print(f\"客户数量: {len(final_features)}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "11e3a92e",
   "metadata": {},
   "source": [
    "### 训练集保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2e4f3753",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "特征文件已保存: ./feature/Train\\TRAIN_MB_TRNFLW_QRYTRNFLW_features.pkl\n",
      "文件大小: 55.26 MB\n"
     ]
    }
   ],
   "source": [
    "# 确保特征目录存在\n",
    "feature_dir = './feature/Train'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征目录: {feature_dir}\")\n",
    "\n",
    "# 保存为pickle格式\n",
    "output_file = os.path.join(feature_dir, 'TRAIN_MB_TRNFLW_QRYTRNFLW_features.pkl')\n",
    "with open(output_file, 'wb') as f:\n",
    "    pickle.dump(final_features, f)\n",
    "\n",
    "print(f\"\\n特征文件已保存: {output_file}\")\n",
    "print(f\"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1a557ed2",
   "metadata": {},
   "source": [
    "### A测试集保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "b558d9ab",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "特征文件已保存: ./feature/A\\A_MB_TRNFLW_QRYTRNFLW_features.pkl\n",
      "文件大小: 5.77 MB\n"
     ]
    }
   ],
   "source": [
    "# 确保特征目录存在\n",
    "feature_dir = './feature/A'\n",
    "if not os.path.exists(feature_dir):\n",
    "    os.makedirs(feature_dir)\n",
    "    print(f\"创建特征目录: {feature_dir}\")\n",
    "\n",
    "# 保存为pickle格式\n",
    "output_file = os.path.join(feature_dir, 'A_MB_TRNFLW_QRYTRNFLW_features.pkl')\n",
    "with open(output_file, 'wb') as f:\n",
    "    pickle.dump(final_features, f)\n",
    "\n",
    "print(f\"\\n特征文件已保存: {output_file}\")\n",
    "print(f\"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e9143f8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
