{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "7776be7f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-06-09 11:00:18,062 - INFO - ==================================================\n",
      "2025-06-09 11:00:18,062 - INFO - 商家重复购买预测系统 - 启动\n",
      "2025-06-09 11:00:18,063 - INFO - ==================================================\n"
     ]
    }
   ],
   "source": [
    "# =============================================================================\n",
    "# 模块1：导入库和基础配置\n",
    "# 功能：导入所需的Python库并配置日志系统\n",
    "# =============================================================================\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import logging\n",
    "import time\n",
    "from datetime import datetime\n",
    "from sklearn.model_selection import train_test_split, StratifiedKFold\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from imblearn.over_sampling import SMOTE\n",
    "import lightgbm as lgb\n",
    "import gc\n",
    "import xgboost as xgb\n",
    "from catboost import CatBoostClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from scipy import stats\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "# 配置日志系统\n",
    "logging.basicConfig(\n",
    "    level=logging.INFO,\n",
    "    format='%(asctime)s - %(levelname)s - %(message)s',\n",
    "    handlers=[logging.StreamHandler()]\n",
    ")\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "logger.info(\"=\"*50)\n",
    "logger.info(\"商家重复购买预测系统 - 启动\")\n",
    "logger.info(\"=\"*50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e9d17a1e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块2：内存优化工具\n",
    "# 功能：减少DataFrame内存使用，提高数据处理效率\n",
    "# =============================================================================\n",
    "\n",
    "def reduce_mem_usage(df):\n",
    "    \"\"\"\n",
    "    优化DataFrame内存使用\n",
    "    通过调整数据类型来减少内存占用\n",
    "    \"\"\"\n",
    "    start_mem = df.memory_usage().sum() / 1024**2\n",
    "    logger.info(f\"内存优化前: {start_mem:.2f} MB\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        col_type = df[col].dtype\n",
    "        \n",
    "        if col_type != object:\n",
    "            c_min = df[col].min()\n",
    "            c_max = df[col].max()\n",
    "            \n",
    "            if str(col_type)[:3] == 'int':\n",
    "                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n",
    "                    df[col] = df[col].astype(np.int8)\n",
    "                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n",
    "                    df[col] = df[col].astype(np.int16)\n",
    "                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n",
    "                    df[col] = df[col].astype(np.int32)\n",
    "                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n",
    "                    df[col] = df[col].astype(np.int64)\n",
    "            else:\n",
    "                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n",
    "                    df[col] = df[col].astype(np.float16)\n",
    "                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n",
    "                    df[col] = df[col].astype(np.float32)\n",
    "                else:\n",
    "                    df[col] = df[col].astype(np.float64)\n",
    "    \n",
    "    end_mem = df.memory_usage().sum() / 1024**2\n",
    "    logger.info(f\"内存优化后: {end_mem:.2f} MB (减少 {100*(start_mem-end_mem)/start_mem:.1f}%)\")\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "4e4efe0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块3：数据加载与预处理类\n",
    "# 功能：处理原始数据，生成基础特征，支持缓存机制\n",
    "# =============================================================================\n",
    "\n",
    "class DataLoader:\n",
    "    \"\"\"\n",
    "    数据加载和预处理类\n",
    "    - 加载原始数据文件\n",
    "    - 处理用户日志数据\n",
    "    - 生成基础特征\n",
    "    - 支持特征缓存机制\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, data_dir):\n",
    "        self.data_dir = data_dir\n",
    "        self.file_paths = {\n",
    "            'train': os.path.join(data_dir, 'train.csv'),\n",
    "            'test': os.path.join(data_dir, 'test_without_label.csv'),\n",
    "            'user_log': os.path.join(data_dir, 'user_log_format2.csv'),\n",
    "            'user_info': os.path.join(data_dir, 'user_info_format1.csv')\n",
    "        }\n",
    "        \n",
    "        # 创建缓存目录\n",
    "        self.cache_dir = os.path.join(os.path.dirname(data_dir), 'feature_cache')\n",
    "        os.makedirs(self.cache_dir, exist_ok=True)\n",
    "        \n",
    "        logger.info(f\"数据加载器初始化完成，数据目录: {data_dir}\")\n",
    "        logger.info(f\"缓存目录: {self.cache_dir}\")\n",
    "        \n",
    "    def validate_files(self):\n",
    "        \"\"\"检查所有必需文件是否存在\"\"\"\n",
    "        missing_files = []\n",
    "        for name, path in self.file_paths.items():\n",
    "            if not os.path.exists(path):\n",
    "                missing_files.append((name, path))\n",
    "        \n",
    "        if missing_files:\n",
    "            logger.error(\"以下文件缺失:\")\n",
    "            for name, path in missing_files:\n",
    "                logger.error(f\"- {name}: {path}\")\n",
    "            raise FileNotFoundError(\"必需数据文件缺失，请检查路径\")\n",
    "        \n",
    "        logger.info(\"所有数据文件验证通过\")\n",
    "    \n",
    "    def load_data(self, file_type):\n",
    "        \"\"\"加载指定类型的数据\"\"\"\n",
    "        path = self.file_paths.get(file_type)\n",
    "        if not path:\n",
    "            raise ValueError(f\"未知文件类型: {file_type}\")\n",
    "        \n",
    "        if not os.path.exists(path):\n",
    "            raise FileNotFoundError(f\"文件不存在: {path}\")\n",
    "        \n",
    "        logger.info(f\"正在加载文件: {file_type}\")\n",
    "        return pd.read_csv(path)\n",
    "    \n",
    "    def preprocess(self):\n",
    "        \"\"\"执行完整的数据预处理流程 - 优化版本\"\"\"\n",
    "        start_time = time.time()\n",
    "        \n",
    "        # 检查主要特征缓存\n",
    "        train_cache_file = os.path.join(self.cache_dir, 'train_features_optimized.pkl')\n",
    "        test_cache_file = os.path.join(self.cache_dir, 'test_features_optimized.pkl')\n",
    "        \n",
    "        # 尝试加载缓存\n",
    "        if os.path.exists(train_cache_file) and os.path.exists(test_cache_file):\n",
    "            logger.info(\"发现优化特征缓存，正在加载...\")\n",
    "            try:\n",
    "                train = pd.read_pickle(train_cache_file)\n",
    "                test = pd.read_pickle(test_cache_file)\n",
    "                logger.info(f\"缓存加载成功! 耗时: {time.time()-start_time:.2f}秒\")\n",
    "                return train, test\n",
    "            except Exception as e:\n",
    "                logger.warning(f\"缓存加载失败: {e}\")\n",
    "                logger.info(\"将重新生成特征...\")\n",
    "        \n",
    "        # 加载原始数据\n",
    "        train = self.load_data('train')\n",
    "        test = self.load_data('test')\n",
    "        user_info = self.load_data('user_info')\n",
    "        \n",
    "        # 处理用户信息\n",
    "        user_info['age_range'] = user_info['age_range'].fillna(0).astype(np.int8)\n",
    "        user_info['gender'] = user_info['gender'].fillna(2).astype(np.int8)\n",
    "        \n",
    "        # 合并用户信息\n",
    "        train = train.merge(user_info, on='user_id', how='left')\n",
    "        test = test.merge(user_info, on='user_id', how='left')\n",
    "        \n",
    "        # 处理用户日志 - 使用缓存机制\n",
    "        logger.info(\"开始处理用户日志数据...\")\n",
    "        user_log = self.process_user_log_optimized(self.file_paths['user_log'])\n",
    "        \n",
    "        # 生成特征\n",
    "        logger.info(\"开始生成基础特征...\")\n",
    "        train = self.generate_features_optimized(train, user_log)\n",
    "        test = self.generate_features_optimized(test, user_log)\n",
    "        \n",
    "        # 内存优化\n",
    "        logger.info(\"开始内存优化...\")\n",
    "        train = reduce_mem_usage(train)\n",
    "        test = reduce_mem_usage(test)\n",
    "        \n",
    "        # 保存缓存\n",
    "        logger.info(\"保存优化特征缓存...\")\n",
    "        try:\n",
    "            train.to_pickle(train_cache_file)\n",
    "            test.to_pickle(test_cache_file)\n",
    "            logger.info(f\"缓存保存成功: {self.cache_dir}\")\n",
    "        except Exception as e:\n",
    "            logger.warning(f\"缓存保存失败: {e}\")\n",
    "        \n",
    "        logger.info(f\"数据预处理完成! 总耗时: {time.time()-start_time:.2f}秒\")\n",
    "        return train, test\n",
    "\n",
    "    def clear_cache(self):\n",
    "        \"\"\"清除特征缓存文件\"\"\"\n",
    "        cache_dir = os.path.join(os.path.dirname(self.data_dir), 'feature_cache')\n",
    "        cache_files = ['train_features.pkl', 'test_features.pkl']\n",
    "        \n",
    "        for cache_file in cache_files:\n",
    "            file_path = os.path.join(cache_dir, cache_file)\n",
    "            if os.path.exists(file_path):\n",
    "                os.remove(file_path)\n",
    "                logger.info(f\"已删除缓存文件: {file_path}\")\n",
    "        \n",
    "        if os.path.exists(cache_dir) and not os.listdir(cache_dir):\n",
    "            os.rmdir(cache_dir)\n",
    "            logger.info(f\"已删除空缓存目录: {cache_dir}\")\n",
    "            \n",
    "    def process_user_log_optimized(self, log_path):\n",
    "        \"\"\"优化的用户日志处理 - 支持缓存\"\"\"\n",
    "        cache_file = os.path.join(self.cache_dir, 'processed_user_log.pkl')\n",
    "        \n",
    "        # 检查缓存\n",
    "        if os.path.exists(cache_file):\n",
    "            logger.info(\"发现用户日志缓存，正在加载...\")\n",
    "            try:\n",
    "                return pd.read_pickle(cache_file)\n",
    "            except Exception as e:\n",
    "                logger.warning(f\"日志缓存加载失败: {e}\")\n",
    "        \n",
    "        logger.info(\"开始处理用户日志...\")\n",
    "        chunks = []\n",
    "        chunk_size = 1000000  # 增大chunk size\n",
    "        total_rows = 0\n",
    "        \n",
    "        for i, chunk in enumerate(pd.read_csv(log_path, chunksize=chunk_size)):\n",
    "            # 批量处理缺失值和异常值\n",
    "            chunk = chunk.fillna(0).replace([np.inf, -np.inf], 0)\n",
    "            \n",
    "            # 类型转换\n",
    "            chunk = chunk.astype({\n",
    "                'user_id': 'int32', 'item_id': 'int32', 'cat_id': 'int32',\n",
    "                'merchant_id': 'int32', 'brand_id': 'int32', \n",
    "                'time_stamp': 'int32', 'action_type': 'int8'\n",
    "            })\n",
    "            \n",
    "            # 添加时间特征\n",
    "            chunk['hour'] = (chunk['time_stamp'] % 86400) // 3600\n",
    "            chunk['day_of_week'] = (chunk['time_stamp'] // 86400) % 7\n",
    "            chunk['hour'] = chunk['hour'].astype(np.int8)\n",
    "            chunk['day_of_week'] = chunk['day_of_week'].astype(np.int8)\n",
    "            \n",
    "            chunks.append(chunk)\n",
    "            total_rows += len(chunk)\n",
    "            \n",
    "            if i % 2 == 0:\n",
    "                logger.info(f\"已处理 {total_rows} 行\")\n",
    "        \n",
    "        result = pd.concat(chunks, ignore_index=True)\n",
    "        \n",
    "        # 保存缓存\n",
    "        try:\n",
    "            result.to_pickle(cache_file)\n",
    "            logger.info(\"用户日志缓存保存成功\")\n",
    "        except Exception as e:\n",
    "            logger.warning(f\"日志缓存保存失败: {e}\")\n",
    "        \n",
    "        logger.info(f\"用户日志处理完成，总计 {total_rows} 行\")\n",
    "        return result\n",
    "    \n",
    "    def generate_features_optimized(self, df, user_log):\n",
    "        \"\"\"优化的特征生成\"\"\"\n",
    "        logger.info(\"开始生成优化特征...\")\n",
    "        \n",
    "        # 缓存各类特征\n",
    "        behavior_cache = os.path.join(self.cache_dir, f'behavior_features_{hash(str(df.shape))}.pkl')\n",
    "        user_cache = os.path.join(self.cache_dir, f'user_features_{hash(str(df.shape))}.pkl')\n",
    "        merchant_cache = os.path.join(self.cache_dir, f'merchant_features_{hash(str(df.shape))}.pkl')\n",
    "        \n",
    "        # 1. 用户-商家行为特征\n",
    "        logger.info(\"  - 计算行为特征...\")\n",
    "        if os.path.exists(behavior_cache):\n",
    "            try:\n",
    "                behavior_features = pd.read_pickle(behavior_cache)\n",
    "                logger.info(\"    从缓存加载行为特征\")\n",
    "            except:\n",
    "                behavior_features = self._generate_behavior_features(user_log)\n",
    "                behavior_features.to_pickle(behavior_cache)\n",
    "        else:\n",
    "            behavior_features = self._generate_behavior_features(user_log)\n",
    "            try:\n",
    "                behavior_features.to_pickle(behavior_cache)\n",
    "            except:\n",
    "                pass\n",
    "        \n",
    "        # 2. 用户特征\n",
    "        logger.info(\"  - 计算用户特征...\")\n",
    "        if os.path.exists(user_cache):\n",
    "            try:\n",
    "                user_features = pd.read_pickle(user_cache)\n",
    "                logger.info(\"    从缓存加载用户特征\")\n",
    "            except:\n",
    "                user_features = self._generate_user_features(user_log)\n",
    "                user_features.to_pickle(user_cache)\n",
    "        else:\n",
    "            user_features = self._generate_user_features(user_log)\n",
    "            try:\n",
    "                user_features.to_pickle(user_cache)\n",
    "            except:\n",
    "                pass\n",
    "        \n",
    "        # 3. 商家特征\n",
    "        logger.info(\"  - 计算商家特征...\")\n",
    "        if os.path.exists(merchant_cache):\n",
    "            try:\n",
    "                merchant_features = pd.read_pickle(merchant_cache)\n",
    "                logger.info(\"    从缓存加载商家特征\")\n",
    "            except:\n",
    "                merchant_features = self._generate_merchant_features(user_log)\n",
    "                merchant_features.to_pickle(merchant_cache)\n",
    "        else:\n",
    "            merchant_features = self._generate_merchant_features(user_log)\n",
    "            try:\n",
    "                merchant_features.to_pickle(merchant_cache)\n",
    "            except:\n",
    "                pass\n",
    "        \n",
    "        # 合并特征\n",
    "        logger.info(\"  - 合并特征...\")\n",
    "        df = df.merge(behavior_features, on=['user_id', 'merchant_id'], how='left')\n",
    "        df = df.merge(user_features, on='user_id', how='left')\n",
    "        df = df.merge(merchant_features, on='merchant_id', how='left')\n",
    "        \n",
    "        # 填充缺失值\n",
    "        numeric_columns = df.select_dtypes(include=[np.number]).columns\n",
    "        df[numeric_columns] = df[numeric_columns].fillna(0)\n",
    "        \n",
    "        logger.info(f\"优化特征生成完成，共 {df.shape[1]} 个特征\")\n",
    "        return df\n",
    "\n",
    "    def _generate_behavior_features(self, user_log):\n",
    "        \"\"\"生成行为特征\"\"\"\n",
    "        behavior_features = user_log.groupby(['user_id', 'merchant_id']).agg({\n",
    "            'action_type': [\n",
    "                'count',\n",
    "                lambda x: (x == 0).sum(),  # 点击\n",
    "                lambda x: (x == 1).sum(),  # 购买\n",
    "                lambda x: (x == 2).sum(),  # 收藏\n",
    "                lambda x: (x == 3).sum()   # 加购物车\n",
    "            ],\n",
    "            'time_stamp': ['max', 'min'],\n",
    "            'hour': lambda x: x.mode().iloc[0] if len(x) > 0 else 12,\n",
    "            'day_of_week': lambda x: x.mode().iloc[0] if len(x) > 0 else 3\n",
    "        }).reset_index()\n",
    "        \n",
    "        # 修正：需要flatten多级列名\n",
    "        behavior_features.columns = [\n",
    "            'user_id', 'merchant_id', 'total_actions', 'clicks', 'buys', \n",
    "            'collects', 'carts', 'last_action', 'first_action', \n",
    "            'favorite_hour', 'favorite_day'\n",
    "        ]\n",
    "        \n",
    "        # 时间特征\n",
    "        behavior_features['action_span'] = behavior_features['last_action'] - behavior_features['first_action']\n",
    "        behavior_features['buy_ratio'] = behavior_features['buys'] / (behavior_features['total_actions'] + 1e-5)\n",
    "        \n",
    "        return behavior_features\n",
    "\n",
    "    def _generate_user_features(self, user_log):\n",
    "        \"\"\"生成用户特征\"\"\"\n",
    "        user_features = user_log.groupby('user_id').agg({\n",
    "            'action_type': [\n",
    "                'count', 'mean',\n",
    "                lambda x: (x == 1).sum()  # 购买次数\n",
    "            ],\n",
    "            'merchant_id': 'nunique',\n",
    "            'item_id': 'nunique',\n",
    "            'cat_id': 'nunique',\n",
    "            'time_stamp': ['min', 'max']\n",
    "        }).reset_index()\n",
    "        \n",
    "        user_features.columns = [\n",
    "            'user_id', 'user_total_actions', 'user_avg_actions', 'user_purchase_count',\n",
    "            'user_unique_merchants', 'user_unique_items', 'user_unique_categories',\n",
    "            'user_first_action_time', 'user_last_action_time'\n",
    "        ]\n",
    "        \n",
    "        user_features['user_action_duration'] = (\n",
    "            user_features['user_last_action_time'] - user_features['user_first_action_time']\n",
    "        )\n",
    "        \n",
    "        return user_features\n",
    "\n",
    "    def _generate_merchant_features(self, user_log):\n",
    "        \"\"\"生成商家特征\"\"\"\n",
    "        merchant_features = user_log.groupby('merchant_id').agg({\n",
    "            'action_type': [\n",
    "                'count',\n",
    "                lambda x: (x == 1).sum(),  # 购买次数\n",
    "                lambda x: (x == 0).sum()   # 点击次数\n",
    "            ],\n",
    "            'user_id': 'nunique',\n",
    "            'item_id': 'nunique',\n",
    "            'cat_id': 'nunique'\n",
    "        }).reset_index()\n",
    "        \n",
    "        merchant_features.columns = [\n",
    "            'merchant_id', 'merchant_total_actions', 'merchant_purchase_count',\n",
    "            'merchant_click_count', 'merchant_unique_users', 'merchant_unique_items',\n",
    "            'merchant_unique_categories'\n",
    "        ]\n",
    "        \n",
    "        merchant_features['merchant_purchase_rate'] = (\n",
    "            merchant_features['merchant_purchase_count'] / (merchant_features['merchant_total_actions'] + 1e-5)\n",
    "        )\n",
    "        \n",
    "        return merchant_features\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "09fa142e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块4：基础特征工程类\n",
    "# 功能：提供基础的特征选择功能\n",
    "# =============================================================================\n",
    "\n",
    "class FeatureEngineer:\n",
    "    \"\"\"\n",
    "    基础特征工程类\n",
    "    - 基于模型重要性的特征选择\n",
    "    - 支持自定义特征数量\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.selected_features = None\n",
    "        logger.info(\"基础特征工程器初始化完成\")\n",
    "    \n",
    "    def select_features(self, train_data, target='label', num_features=30):\n",
    "        \"\"\"基于重要性选择特征\"\"\"\n",
    "        logger.info(f\"开始基础特征选择，目标特征数: {num_features}\")\n",
    "        \n",
    "        # 分离特征和标签\n",
    "        X = train_data.drop(columns=[target, 'user_id', 'merchant_id'], errors='ignore')\n",
    "        y = train_data[target]\n",
    "        \n",
    "        # 训练LightGBM模型获取特征重要性\n",
    "        model = lgb.LGBMClassifier(\n",
    "            n_estimators=100,\n",
    "            random_state=42,\n",
    "            verbose=-1,\n",
    "            class_weight='balanced'\n",
    "        )\n",
    "        model.fit(X, y)\n",
    "        \n",
    "        # 获取特征重要性\n",
    "        importance = pd.DataFrame({\n",
    "            'feature': X.columns,\n",
    "            'importance': model.feature_importances_\n",
    "        }).sort_values('importance', ascending=False)\n",
    "        \n",
    "        # 选择最重要的特征\n",
    "        self.selected_features = importance.head(num_features)['feature'].tolist()\n",
    "        \n",
    "        logger.info(f\"基础特征选择完成，已选择 {len(self.selected_features)} 个特征\")\n",
    "        logger.info(\"重要性前5的特征:\")\n",
    "        for i, row in importance.head(5).iterrows():\n",
    "            logger.info(f\"  {row['feature']}: {row['importance']:.4f}\")\n",
    "            \n",
    "        return self.selected_features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "b2e125d3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块5：增强特征工程类\n",
    "# 功能：生成高级特征，多重特征选择策略\n",
    "# =============================================================================\n",
    "\n",
    "class EnhancedFeatureEngineer(FeatureEngineer):\n",
    "    \"\"\"\n",
    "    增强特征工程类\n",
    "    - 继承基础特征工程功能\n",
    "    - 生成时间窗口特征\n",
    "    - 生成行为序列特征\n",
    "    - 生成交叉特征\n",
    "    - 生成统计特征\n",
    "    - 多重特征选择策略\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.feature_interactions = []\n",
    "        self.cache_dir = None\n",
    "        logger.info(\"增强特征工程器初始化完成\")\n",
    "    \n",
    "    def create_advanced_features(self, df, user_log):\n",
    "        \"\"\"创建高级特征 - 优化版本\"\"\"\n",
    "        logger.info(\"开始创建高级特征...\")\n",
    "        \n",
    "        # 设置缓存目录\n",
    "        if self.cache_dir is None:\n",
    "            self.cache_dir = os.path.join(os.path.dirname(os.getcwd()), 'feature_cache')\n",
    "            os.makedirs(self.cache_dir, exist_ok=True)\n",
    "        \n",
    "        # 时间窗口特征 - 优化版本\n",
    "        df = self._create_time_window_features_optimized(df, user_log)\n",
    "        \n",
    "        # 行为序列特征\n",
    "        df = self._create_sequence_features(df, user_log)\n",
    "        \n",
    "        # 交叉特征\n",
    "        df = self._create_interaction_features(df)\n",
    "        \n",
    "        # 统计特征 - 大幅优化\n",
    "        df = self._create_statistical_features_optimized(df, user_log)\n",
    "        \n",
    "        logger.info(f\"高级特征创建完成，当前特征数: {df.shape[1]}\")\n",
    "        return df\n",
    "        \n",
    "    def _create_time_window_features_optimized(self, df, user_log):\n",
    "        \"\"\"优化的时间窗口特征\"\"\"\n",
    "        logger.info(\"  - 生成时间窗口特征（优化版本）...\")\n",
    "        \n",
    "        cache_file = os.path.join(self.cache_dir, 'time_window_features.pkl')\n",
    "        \n",
    "        if os.path.exists(cache_file):\n",
    "            try:\n",
    "                time_features = pd.read_pickle(cache_file)\n",
    "                logger.info(\"    从缓存加载时间窗口特征\")\n",
    "                df = df.merge(time_features, on=['user_id', 'merchant_id'], how='left')\n",
    "                return df\n",
    "            except:\n",
    "                logger.info(\"    缓存加载失败，重新计算\")\n",
    "        \n",
    "        current_time = user_log['time_stamp'].max()\n",
    "        \n",
    "        # 批量处理多个时间窗口\n",
    "        all_time_features = []\n",
    "        \n",
    "        for days in [3, 7, 14, 30]:\n",
    "            time_threshold = current_time - days * 24 * 3600\n",
    "            recent_log = user_log[user_log['time_stamp'] >= time_threshold]\n",
    "            \n",
    "            if len(recent_log) == 0:\n",
    "                continue\n",
    "            \n",
    "            recent_features = recent_log.groupby(['user_id', 'merchant_id']).agg({\n",
    "                'action_type': [\n",
    "                    'count',\n",
    "                    lambda x: (x == 1).sum(),  # 购买\n",
    "                    lambda x: (x == 0).sum(),  # 点击\n",
    "                    lambda x: len(set(x))      # 行为类型数\n",
    "                ],\n",
    "                'item_id': 'nunique'\n",
    "            }).reset_index()\n",
    "            \n",
    "            recent_features.columns = [\n",
    "                'user_id', 'merchant_id',\n",
    "                f'recent_{days}d_actions', f'recent_{days}d_purchases',\n",
    "                f'recent_{days}d_clicks', f'recent_{days}d_action_types',\n",
    "                f'recent_{days}d_items'\n",
    "            ]\n",
    "            \n",
    "            all_time_features.append(recent_features)\n",
    "        \n",
    "        # 合并所有时间窗口特征\n",
    "        if all_time_features:\n",
    "            from functools import reduce\n",
    "            time_features = reduce(\n",
    "                lambda left, right: pd.merge(left, right, on=['user_id', 'merchant_id'], how='outer'),\n",
    "                all_time_features\n",
    "            )\n",
    "            \n",
    "            # 保存缓存\n",
    "            try:\n",
    "                time_features.to_pickle(cache_file)\n",
    "            except:\n",
    "                pass\n",
    "            \n",
    "            df = df.merge(time_features, on=['user_id', 'merchant_id'], how='left')\n",
    "        \n",
    "        # 填充缺失值\n",
    "        time_cols = [col for col in df.columns if 'recent_' in col]\n",
    "        df[time_cols] = df[time_cols].fillna(0)\n",
    "        \n",
    "        return df\n",
    "\n",
    "    def _create_sequence_features(self, df, user_log):\n",
    "        \"\"\"创建行为序列特征\"\"\"\n",
    "        logger.info(\"  - 生成行为序列特征...\")\n",
    "        \n",
    "        # 用户行为序列长度和变化\n",
    "        user_sequence = user_log.groupby(['user_id', 'merchant_id']).agg({\n",
    "            'action_type': lambda x: len(x.unique()),  # 行为类型多样性\n",
    "            'time_stamp': lambda x: len(x) / (x.max() - x.min() + 1) if x.max() != x.min() else 0  # 行为频率\n",
    "        }).reset_index()\n",
    "        \n",
    "        user_sequence.columns = ['user_id', 'merchant_id', 'action_diversity', 'action_frequency']\n",
    "        df = df.merge(user_sequence, on=['user_id', 'merchant_id'], how='left')\n",
    "        df['action_diversity'] = df['action_diversity'].fillna(0)\n",
    "        df['action_frequency'] = df['action_frequency'].fillna(0)\n",
    "        \n",
    "        return df\n",
    "\n",
    "    def _create_interaction_features(self, df):\n",
    "        \"\"\"创建交叉特征\"\"\"\n",
    "        logger.info(\"  - 生成交叉特征...\")\n",
    "        \n",
    "        # 数值特征交叉\n",
    "        numeric_cols = ['total_actions', 'user_total_actions', 'merchant_total_actions', \n",
    "                    'age_range', 'clicks', 'buys']\n",
    "        \n",
    "        interaction_count = 0\n",
    "        for i, col1 in enumerate(numeric_cols):\n",
    "            for col2 in numeric_cols[i+1:]:\n",
    "                if col1 in df.columns and col2 in df.columns:\n",
    "                    df[f'{col1}_{col2}_ratio'] = df[col1] / (df[col2] + 1e-5)\n",
    "                    df[f'{col1}_{col2}_product'] = df[col1] * df[col2]\n",
    "                    interaction_count += 2\n",
    "        \n",
    "        # 类别特征交叉\n",
    "        if 'age_range' in df.columns and 'gender' in df.columns:\n",
    "            df['age_gender'] = df['age_range'].astype(str) + '_' + df['gender'].astype(str)\n",
    "            interaction_count += 1\n",
    "        \n",
    "        logger.info(f\"    创建了 {interaction_count} 个交叉特征\")\n",
    "        return df\n",
    "\n",
    "    def _create_statistical_features_optimized(self, df, user_log):\n",
    "        \"\"\"大幅优化的统计特征生成\"\"\"\n",
    "        logger.info(\"  - 生成统计特征（优化版本）...\")\n",
    "        \n",
    "        cache_file = os.path.join(self.cache_dir, 'statistical_features.pkl')\n",
    "        \n",
    "        if os.path.exists(cache_file):\n",
    "            try:\n",
    "                user_stats = pd.read_pickle(cache_file)\n",
    "                logger.info(\"    从缓存加载统计特征\")\n",
    "                df = df.merge(user_stats, on='user_id', how='left')\n",
    "                return df\n",
    "            except:\n",
    "                logger.info(\"    缓存加载失败，重新计算\")\n",
    "        \n",
    "        # 使用向量化操作替代循环\n",
    "        logger.info(\"    使用向量化方法计算统计特征...\")\n",
    "        \n",
    "        # 用户行为统计 - 向量化计算\n",
    "        user_action_stats = user_log.groupby('user_id')['action_type'].agg([\n",
    "            'std', 'var', 'nunique',\n",
    "            lambda x: x.max() - x.min(),  # 行为范围\n",
    "            lambda x: stats.skew(x) if len(x) > 2 else 0  # 偏度\n",
    "        ]).reset_index()\n",
    "        \n",
    "        user_action_stats.columns = [\n",
    "            'user_id', 'action_std', 'action_var', 'action_nunique',\n",
    "            'action_range', 'action_skew'\n",
    "        ]\n",
    "        \n",
    "        # 时间间隔统计 - 向量化计算\n",
    "        user_time_stats = []\n",
    "        for user_id in user_log['user_id'].unique():\n",
    "            user_data = user_log[user_log['user_id'] == user_id]['time_stamp'].sort_values()\n",
    "            if len(user_data) > 1:\n",
    "                time_diffs = user_data.diff().dropna()\n",
    "                time_interval_mean = time_diffs.mean()\n",
    "                time_interval_std = time_diffs.std()\n",
    "            else:\n",
    "                time_interval_mean = 0\n",
    "                time_interval_std = 0\n",
    "            \n",
    "            user_time_stats.append({\n",
    "                'user_id': user_id,\n",
    "                'time_interval_mean': time_interval_mean,\n",
    "                'time_interval_std': time_interval_std\n",
    "            })\n",
    "        \n",
    "        user_time_df = pd.DataFrame(user_time_stats)\n",
    "        \n",
    "        # 合并统计特征\n",
    "        user_stats = user_action_stats.merge(user_time_df, on='user_id', how='left')\n",
    "        \n",
    "        # 填充NaN值\n",
    "        numeric_cols = user_stats.select_dtypes(include=[np.number]).columns\n",
    "        user_stats[numeric_cols] = user_stats[numeric_cols].fillna(0)\n",
    "        \n",
    "        # 保存缓存\n",
    "        try:\n",
    "            user_stats.to_pickle(cache_file)\n",
    "            logger.info(\"    统计特征缓存保存成功\")\n",
    "        except:\n",
    "            pass\n",
    "        \n",
    "        df = df.merge(user_stats, on='user_id', how='left')\n",
    "        return df\n",
    "\n",
    "    def select_features_advanced(self, train_data, target='label', num_features=50):\n",
    "        \"\"\"高级特征选择\"\"\"\n",
    "        logger.info(f\"开始高级特征选择，目标特征数: {num_features}\")\n",
    "        \n",
    "        # 移除ID列和目标列\n",
    "        X = train_data.drop(columns=[target, 'user_id', 'merchant_id'], errors='ignore')\n",
    "        y = train_data[target]\n",
    "        \n",
    "        # 处理类别特征\n",
    "        categorical_features = ['age_gender'] if 'age_gender' in X.columns else []\n",
    "        \n",
    "        # 处理类别特征编码\n",
    "        X_processed = X.copy()\n",
    "        label_encoders = {}\n",
    "        \n",
    "        for cat_col in categorical_features:\n",
    "            if cat_col in X_processed.columns:\n",
    "                le = LabelEncoder()\n",
    "                X_processed[cat_col] = le.fit_transform(X_processed[cat_col].astype(str))\n",
    "                label_encoders[cat_col] = le\n",
    "        \n",
    "        # 方法1：基于F统计量的特征选择\n",
    "        logger.info(\"  - 执行F统计量特征选择...\")\n",
    "        selector_f = SelectKBest(f_classif, k=min(num_features, X_processed.shape[1]))\n",
    "        X_f = selector_f.fit_transform(X_processed, y)\n",
    "        f_scores = selector_f.scores_\n",
    "        \n",
    "        # 方法2：基于互信息的特征选择\n",
    "        logger.info(\"  - 执行互信息特征选择...\")\n",
    "        selector_mi = SelectKBest(mutual_info_classif, k=min(num_features, X_processed.shape[1]))\n",
    "        X_mi = selector_mi.fit_transform(X_processed, y)\n",
    "        mi_scores = selector_mi.scores_\n",
    "        \n",
    "        # 方法3：基于模型重要性的特征选择\n",
    "        logger.info(\"  - 执行模型重要性特征选择...\")\n",
    "        model = lgb.LGBMClassifier(\n",
    "            n_estimators=100,\n",
    "            random_state=42,\n",
    "            verbose=-1,\n",
    "            class_weight='balanced'\n",
    "        )\n",
    "        model.fit(X_processed, y)\n",
    "        \n",
    "        # 合并特征重要性分数\n",
    "        feature_scores = pd.DataFrame({\n",
    "            'feature': X_processed.columns,\n",
    "            'lgb_importance': model.feature_importances_,\n",
    "            'f_score': f_scores,\n",
    "            'mi_score': mi_scores\n",
    "        })\n",
    "        \n",
    "        # 标准化分数\n",
    "        for col in ['lgb_importance', 'f_score', 'mi_score']:\n",
    "            feature_scores[col] = (\n",
    "                (feature_scores[col] - feature_scores[col].min()) / \n",
    "                (feature_scores[col].max() - feature_scores[col].min() + 1e-8)\n",
    "            )\n",
    "        \n",
    "        # 综合评分\n",
    "        feature_scores['combined_score'] = (\n",
    "            0.4 * feature_scores['lgb_importance'] + \n",
    "            0.3 * feature_scores['f_score'] + \n",
    "            0.3 * feature_scores['mi_score']\n",
    "        )\n",
    "        \n",
    "        # 选择最优特征\n",
    "        feature_scores = feature_scores.sort_values('combined_score', ascending=False)\n",
    "        self.selected_features = feature_scores.head(num_features)['feature'].tolist()\n",
    "        \n",
    "        logger.info(f\"高级特征选择完成，已选择 {len(self.selected_features)} 个特征\")\n",
    "        logger.info(\"综合评分前10的特征:\")\n",
    "        print(feature_scores.head(10)[['feature', 'combined_score']])\n",
    "        \n",
    "        return self.selected_features\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "daa1f0fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块6：集成模型系统\n",
    "# 功能：整合多种机器学习模型，提供集成预测能力\n",
    "# =============================================================================\n",
    "\n",
    "class EnsembleModel:\n",
    "    \"\"\"\n",
    "    集成模型类\n",
    "    - 支持LightGBM、XGBoost、CatBoost、RandomForest\n",
    "    - 自动计算模型权重\n",
    "    - 提供集成预测功能\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.models = {}\n",
    "        self.weights = {}\n",
    "        self.model_scores = {}\n",
    "        self.feature_importance = {}\n",
    "        logger.info(\"增强集成模型系统初始化完成\")\n",
    "    \n",
    "    def add_model(self, name, model_class, params=None):\n",
    "        \"\"\"添加模型到集成中\"\"\"\n",
    "        self.models[name] = {\n",
    "            'class': model_class,\n",
    "            'params': params or {},\n",
    "            'trained_model': None\n",
    "        }\n",
    "        logger.info(f\"已添加模型: {name}\")\n",
    "    \n",
    "    def train_all_models(self, X_train, y_train, X_val=None, y_val=None):\n",
    "        \"\"\"训练所有模型 - 改进版本\"\"\"\n",
    "        logger.info(\"开始训练增强集成模型...\")\n",
    "        \n",
    "        # 处理样本不平衡\n",
    "        logger.info(\"  - 处理样本不平衡...\")\n",
    "        try:\n",
    "            smote = SMOTE(sampling_strategy=0.4, random_state=42, k_neighbors=3)\n",
    "            X_res, y_res = smote.fit_resample(X_train, y_train)\n",
    "            logger.info(f\"  - SMOTE后样本数: {len(X_res)} (原始: {len(X_train)})\")\n",
    "        except Exception as e:\n",
    "            logger.warning(f\"SMOTE失败，使用原始数据: {e}\")\n",
    "            X_res, y_res = X_train, y_train\n",
    "        \n",
    "        successful_models = 0\n",
    "        \n",
    "        for name, model_info in self.models.items():\n",
    "            logger.info(f\"  - 训练模型: {name}\")\n",
    "            \n",
    "            try:\n",
    "                # 根据模型名称选择训练方法\n",
    "                if 'lightgbm' in name.lower():\n",
    "                    model = self._train_lightgbm_robust(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                elif 'xgboost' in name.lower():\n",
    "                    model = self._train_xgboost_robust(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                elif 'catboost' in name.lower():\n",
    "                    model = self._train_catboost_robust(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                elif 'random_forest' in name.lower():\n",
    "                    model = self._train_random_forest_robust(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                elif 'extra_trees' in name.lower():\n",
    "                    model = self._train_extra_trees(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                elif 'gradient_boost' in name.lower():\n",
    "                    model = self._train_gradient_boost(X_res, y_res, X_val, y_val, model_info['params'])\n",
    "                else:\n",
    "                    logger.warning(f\"    未知模型类型: {name}\")\n",
    "                    continue\n",
    "                \n",
    "                if model is not None:\n",
    "                    self.models[name]['trained_model'] = model\n",
    "                    successful_models += 1\n",
    "                    \n",
    "                    # 评估单个模型\n",
    "                    if X_val is not None and y_val is not None:\n",
    "                        val_preds = self._predict_single_model(model, X_val, name)\n",
    "                        if val_preds is not None and len(val_preds) == len(y_val):\n",
    "                            auc = roc_auc_score(y_val, val_preds)\n",
    "                            self.model_scores[name] = auc\n",
    "                            logger.info(f\"    {name} 验证集AUC: {auc:.4f}\")\n",
    "                        else:\n",
    "                            logger.warning(f\"    {name} 预测结果异常\")\n",
    "                    \n",
    "            except Exception as e:\n",
    "                logger.error(f\"    {name} 训练失败: {str(e)}\")\n",
    "                continue\n",
    "        \n",
    "        logger.info(f\"成功训练 {successful_models}/{len(self.models)} 个模型\")\n",
    "        \n",
    "        if successful_models == 0:\n",
    "            logger.error(\"所有模型训练失败！\")\n",
    "            raise RuntimeError(\"无可用模型\")\n",
    "        \n",
    "        # 计算模型权重\n",
    "        self._calculate_weights()\n",
    "    \n",
    "    def _train_lightgbm_robust(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"稳健的LightGBM训练\"\"\"\n",
    "        try:\n",
    "            # 处理特征名称\n",
    "            X_train = pd.DataFrame(X_train) if not isinstance(X_train, pd.DataFrame) else X_train\n",
    "            X_val = pd.DataFrame(X_val) if X_val is not None and not isinstance(X_val, pd.DataFrame) else X_val\n",
    "            \n",
    "            train_data = lgb.Dataset(X_train, label=y_train)\n",
    "            valid_data = lgb.Dataset(X_val, label=y_val, reference=train_data) if X_val is not None else None\n",
    "            \n",
    "            default_params = {\n",
    "                'objective': 'binary',\n",
    "                'metric': 'auc',\n",
    "                'boosting_type': 'gbdt',\n",
    "                'learning_rate': 0.05,\n",
    "                'num_leaves': 31,\n",
    "                'max_depth': 7,\n",
    "                'min_child_samples': 20,\n",
    "                'feature_fraction': 0.8,\n",
    "                'bagging_fraction': 0.8,\n",
    "                'bagging_freq': 5,\n",
    "                'reg_alpha': 0.1,\n",
    "                'reg_lambda': 0.1,\n",
    "                'random_state': 42,\n",
    "                'n_jobs': -1,\n",
    "                'verbose': -1,\n",
    "                'force_col_wise': True  # 避免并行问题\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            callbacks = [lgb.early_stopping(50, verbose=False)]\n",
    "            \n",
    "            model = lgb.train(\n",
    "                default_params,\n",
    "                train_data,\n",
    "                num_boost_round=1000,\n",
    "                valid_sets=[train_data] + ([valid_data] if valid_data else []),\n",
    "                callbacks=callbacks\n",
    "            )\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"LightGBM训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _train_xgboost_robust(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"稳健的XGBoost训练\"\"\"\n",
    "        try:\n",
    "            default_params = {\n",
    "                'objective': 'binary:logistic',\n",
    "                'eval_metric': 'auc',\n",
    "                'learning_rate': 0.05,\n",
    "                'max_depth': 6,\n",
    "                'min_child_weight': 1,\n",
    "                'subsample': 0.8,\n",
    "                'colsample_bytree': 0.8,\n",
    "                'reg_alpha': 0.1,\n",
    "                'reg_lambda': 0.1,\n",
    "                'random_state': 42,\n",
    "                'n_jobs': -1,\n",
    "                'verbosity': 0,\n",
    "                'tree_method': 'hist'  # 更稳定的树方法\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            dtrain = xgb.DMatrix(X_train, label=y_train)\n",
    "            dval = xgb.DMatrix(X_val, label=y_val) if X_val is not None else None\n",
    "            \n",
    "            evals = [(dtrain, 'train')] + ([(dval, 'valid')] if dval else [])\n",
    "            \n",
    "            model = xgb.train(\n",
    "                default_params,\n",
    "                dtrain,\n",
    "                num_boost_round=1000,\n",
    "                evals=evals,\n",
    "                early_stopping_rounds=50,\n",
    "                verbose_eval=False\n",
    "            )\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"XGBoost训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _train_catboost_robust(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"稳健的CatBoost训练\"\"\"\n",
    "        try:\n",
    "            default_params = {\n",
    "                'iterations': 1000,\n",
    "                'learning_rate': 0.05,\n",
    "                'depth': 6,\n",
    "                'l2_leaf_reg': 3,\n",
    "                'bootstrap_type': 'Bernoulli',\n",
    "                'subsample': 0.8,\n",
    "                'random_seed': 42,\n",
    "                'verbose': False,\n",
    "                'early_stopping_rounds': 50,\n",
    "                'task_type': 'CPU'  # 确保使用CPU\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            model = CatBoostClassifier(**default_params)\n",
    "            \n",
    "            eval_set = (X_val, y_val) if X_val is not None else None\n",
    "            model.fit(\n",
    "                X_train, y_train, \n",
    "                eval_set=eval_set, \n",
    "                use_best_model=True,\n",
    "                verbose=False\n",
    "            )\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"CatBoost训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _train_random_forest_robust(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"稳健的随机森林训练\"\"\"\n",
    "        try:\n",
    "            default_params = {\n",
    "                'n_estimators': 300,\n",
    "                'max_depth': 12,\n",
    "                'min_samples_split': 5,\n",
    "                'min_samples_leaf': 2,\n",
    "                'max_features': 'sqrt',\n",
    "                'bootstrap': True,\n",
    "                'random_state': 42,\n",
    "                'n_jobs': -1,\n",
    "                'class_weight': 'balanced'\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            model = RandomForestClassifier(**default_params)\n",
    "            model.fit(X_train, y_train)\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"RandomForest训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _train_extra_trees(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"训练ExtraTrees模型\"\"\"\n",
    "        try:\n",
    "            from sklearn.ensemble import ExtraTreesClassifier\n",
    "            \n",
    "            default_params = {\n",
    "                'n_estimators': 300,\n",
    "                'max_depth': 12,\n",
    "                'min_samples_split': 5,\n",
    "                'min_samples_leaf': 2,\n",
    "                'max_features': 'sqrt',\n",
    "                'bootstrap': True,\n",
    "                'random_state': 42,\n",
    "                'n_jobs': -1,\n",
    "                'class_weight': 'balanced'\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            model = ExtraTreesClassifier(**default_params)\n",
    "            model.fit(X_train, y_train)\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"ExtraTrees训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _train_gradient_boost(self, X_train, y_train, X_val, y_val, params):\n",
    "        \"\"\"训练GradientBoosting模型\"\"\"\n",
    "        try:\n",
    "            from sklearn.ensemble import GradientBoostingClassifier\n",
    "            \n",
    "            default_params = {\n",
    "                'n_estimators': 200,\n",
    "                'learning_rate': 0.1,\n",
    "                'max_depth': 6,\n",
    "                'min_samples_split': 5,\n",
    "                'min_samples_leaf': 2,\n",
    "                'subsample': 0.8,\n",
    "                'random_state': 42\n",
    "            }\n",
    "            default_params.update(params)\n",
    "            \n",
    "            model = GradientBoostingClassifier(**default_params)\n",
    "            model.fit(X_train, y_train)\n",
    "            \n",
    "            return model\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"GradientBoosting训练失败: {e}\")\n",
    "            return None\n",
    "    \n",
    "    def _predict_single_model(self, model, X, model_name):\n",
    "        \"\"\"单个模型预测 - 改进版本\"\"\"\n",
    "        try:\n",
    "            if model is None:\n",
    "                return None\n",
    "            \n",
    "            if 'lightgbm' in model_name.lower():\n",
    "                return model.predict(X, num_iteration=model.best_iteration)\n",
    "            elif 'xgboost' in model_name.lower():\n",
    "                dtest = xgb.DMatrix(X)\n",
    "                return model.predict(dtest, ntree_limit=model.best_ntree_limit)\n",
    "            elif 'catboost' in model_name.lower():\n",
    "                return model.predict_proba(X)[:, 1]\n",
    "            elif any(tree_type in model_name.lower() for tree_type in ['random_forest', 'extra_trees', 'gradient_boost']):\n",
    "                return model.predict_proba(X)[:, 1]\n",
    "            else:\n",
    "                logger.error(f\"未知模型类型: {model_name}\")\n",
    "                return None\n",
    "                \n",
    "        except Exception as e:\n",
    "            logger.error(f\"模型 {model_name} 预测失败: {str(e)}\")\n",
    "            return None\n",
    "    \n",
    "    def _calculate_weights(self):\n",
    "        \"\"\"根据性能计算模型权重 - 改进版本\"\"\"\n",
    "        if not self.model_scores:\n",
    "            # 等权重\n",
    "            trained_models = [name for name, model_info in self.models.items() \n",
    "                            if model_info['trained_model'] is not None]\n",
    "            if trained_models:\n",
    "                weight = 1.0 / len(trained_models)\n",
    "                self.weights = {name: weight for name in trained_models}\n",
    "        else:\n",
    "            # 基于AUC分数的权重 - 使用softmax函数\n",
    "            scores = np.array(list(self.model_scores.values()))\n",
    "            # 增强分数差异\n",
    "            scores = scores * 10\n",
    "            exp_scores = np.exp(scores - np.max(scores))  # 数值稳定性\n",
    "            softmax_weights = exp_scores / np.sum(exp_scores)\n",
    "            \n",
    "            self.weights = {\n",
    "                name: weight for name, weight in zip(self.model_scores.keys(), softmax_weights)\n",
    "            }\n",
    "        \n",
    "        logger.info(\"模型权重分配完成:\")\n",
    "        for name, weight in self.weights.items():\n",
    "            score = self.model_scores.get(name, 0)\n",
    "            logger.info(f\"  {name}: {weight:.4f} (AUC: {score:.4f})\")\n",
    "    \n",
    "    def predict(self, X):\n",
    "        \"\"\"集成预测 - 改进版本\"\"\"\n",
    "        predictions = {}\n",
    "        \n",
    "        for name, model_info in self.models.items():\n",
    "            if model_info['trained_model'] is not None and name in self.weights:\n",
    "                pred = self._predict_single_model(model_info['trained_model'], X, name)\n",
    "                if pred is not None:\n",
    "                    predictions[name] = pred\n",
    "        \n",
    "        if not predictions:\n",
    "            logger.error(\"没有可用的训练模型进行预测\")\n",
    "            return np.zeros(len(X))\n",
    "        \n",
    "        # 加权平均\n",
    "        ensemble_pred = np.zeros(len(X))\n",
    "        total_weight = 0\n",
    "        \n",
    "        for name, pred in predictions.items():\n",
    "            weight = self.weights.get(name, 0)\n",
    "            ensemble_pred += weight * pred\n",
    "            total_weight += weight\n",
    "        \n",
    "        # 归一化权重\n",
    "        if total_weight > 0:\n",
    "            ensemble_pred /= total_weight\n",
    "        \n",
    "        return ensemble_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "7b35559c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块7：优化模型训练器\n",
    "# 功能：管理集成模型训练，支持交叉验证\n",
    "# =============================================================================\n",
    "\n",
    "class OptimizedModelTrainer:\n",
    "    \"\"\"\n",
    "    优化模型训练器\n",
    "    - 管理多个模型配置\n",
    "    - 支持交叉验证训练\n",
    "    - 自动性能评估\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.ensemble_model = EnsembleModel()\n",
    "        self.best_auc = 0.0\n",
    "        logger.info(\"优化模型训练器初始化完成\")\n",
    "    \n",
    "    def setup_models(self):\n",
    "        \"\"\"设置多个高性能模型配置\"\"\"\n",
    "        logger.info(\"设置增强模型配置...\")\n",
    "        \n",
    "        # LightGBM - 多种配置\n",
    "        self.ensemble_model.add_model('lightgbm_conservative', None, {\n",
    "            'learning_rate': 0.05,\n",
    "            'num_leaves': 31,\n",
    "            'max_depth': 7,\n",
    "            'feature_fraction': 0.8,\n",
    "            'bagging_fraction': 0.8,\n",
    "            'reg_alpha': 0.1,\n",
    "            'reg_lambda': 0.1\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('lightgbm_aggressive', None, {\n",
    "            'learning_rate': 0.03,\n",
    "            'num_leaves': 63,\n",
    "            'max_depth': 9,\n",
    "            'feature_fraction': 0.9,\n",
    "            'bagging_fraction': 0.9,\n",
    "            'reg_alpha': 0.05,\n",
    "            'reg_lambda': 0.05\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('lightgbm_deep', None, {\n",
    "            'learning_rate': 0.02,\n",
    "            'num_leaves': 127,\n",
    "            'max_depth': 10,\n",
    "            'feature_fraction': 0.85,\n",
    "            'bagging_fraction': 0.85,\n",
    "            'reg_alpha': 0.2,\n",
    "            'reg_lambda': 0.2\n",
    "        })\n",
    "        \n",
    "        # XGBoost - 多种配置\n",
    "        self.ensemble_model.add_model('xgboost_standard', None, {\n",
    "            'learning_rate': 0.05,\n",
    "            'max_depth': 6,\n",
    "            'subsample': 0.8,\n",
    "            'colsample_bytree': 0.8,\n",
    "            'reg_alpha': 0.1,\n",
    "            'reg_lambda': 0.1\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('xgboost_deep', None, {\n",
    "            'learning_rate': 0.03,\n",
    "            'max_depth': 8,\n",
    "            'subsample': 0.9,\n",
    "            'colsample_bytree': 0.9,\n",
    "            'reg_alpha': 0.05,\n",
    "            'reg_lambda': 0.05\n",
    "        })\n",
    "        \n",
    "        # CatBoost - 多种配置\n",
    "        self.ensemble_model.add_model('catboost_standard', None, {\n",
    "            'learning_rate': 0.05,\n",
    "            'depth': 6,\n",
    "            'l2_leaf_reg': 3,\n",
    "            'subsample': 0.8\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('catboost_deep', None, {\n",
    "            'learning_rate': 0.03,\n",
    "            'depth': 8,\n",
    "            'l2_leaf_reg': 1,\n",
    "            'subsample': 0.9\n",
    "        })\n",
    "        \n",
    "        # Random Forest - 多种配置\n",
    "        self.ensemble_model.add_model('random_forest_standard', None, {\n",
    "            'n_estimators': 300,\n",
    "            'max_depth': 12,\n",
    "            'min_samples_split': 5,\n",
    "            'min_samples_leaf': 2\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('random_forest_deep', None, {\n",
    "            'n_estimators': 500,\n",
    "            'max_depth': 15,\n",
    "            'min_samples_split': 3,\n",
    "            'min_samples_leaf': 1\n",
    "        })\n",
    "        \n",
    "        # Extra Trees\n",
    "        self.ensemble_model.add_model('extra_trees_standard', None, {\n",
    "            'n_estimators': 300,\n",
    "            'max_depth': 12,\n",
    "            'min_samples_split': 5,\n",
    "            'min_samples_leaf': 2\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('extra_trees_deep', None, {\n",
    "            'n_estimators': 500,\n",
    "            'max_depth': 15,\n",
    "            'min_samples_split': 3,\n",
    "            'min_samples_leaf': 1\n",
    "        })\n",
    "        \n",
    "        # Gradient Boosting\n",
    "        self.ensemble_model.add_model('gradient_boost_standard', None, {\n",
    "            'n_estimators': 200,\n",
    "            'learning_rate': 0.1,\n",
    "            'max_depth': 6,\n",
    "            'subsample': 0.8\n",
    "        })\n",
    "        \n",
    "        self.ensemble_model.add_model('gradient_boost_conservative', None, {\n",
    "            'n_estimators': 300,\n",
    "            'learning_rate': 0.05,\n",
    "            'max_depth': 8,\n",
    "            'subsample': 0.9\n",
    "        })\n",
    "        \n",
    "        logger.info(f\"模型配置完成，共设置 {len(self.ensemble_model.models)} 个模型\")\n",
    "    \n",
    "    def train_with_cv(self, X, y, cv_folds=5):\n",
    "        \"\"\"使用交叉验证训练 - 改进版本\"\"\"\n",
    "        logger.info(f\"开始 {cv_folds} 折交叉验证训练...\")\n",
    "        \n",
    "        # 数据预处理\n",
    "        X = X.fillna(0)\n",
    "        X = X.replace([np.inf, -np.inf], 0)\n",
    "        \n",
    "        skf = StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=42)\n",
    "        cv_scores = []\n",
    "        \n",
    "        for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):\n",
    "            logger.info(f\"训练第 {fold + 1}/{cv_folds} 折...\")\n",
    "            \n",
    "            X_train_fold, X_val_fold = X.iloc[train_idx], X.iloc[val_idx]\n",
    "            y_train_fold, y_val_fold = y.iloc[train_idx], y.iloc[val_idx]\n",
    "            \n",
    "            # 为每折创建独立的集成模型\n",
    "            fold_ensemble = EnsembleModel()\n",
    "            fold_ensemble.models = {\n",
    "                name: {\n",
    "                    'class': model_info['class'],\n",
    "                    'params': model_info['params'].copy(),\n",
    "                    'trained_model': None\n",
    "                } for name, model_info in self.ensemble_model.models.items()\n",
    "            }\n",
    "            \n",
    "            # 训练当前折的模型\n",
    "            fold_ensemble.train_all_models(X_train_fold, y_train_fold, X_val_fold, y_val_fold)\n",
    "            \n",
    "            # 预测验证集\n",
    "            val_preds = fold_ensemble.predict(X_val_fold)\n",
    "            if len(val_preds) > 0 and not np.all(val_preds == 0):\n",
    "                fold_auc = roc_auc_score(y_val_fold, val_preds)\n",
    "                cv_scores.append(fold_auc)\n",
    "                logger.info(f\"  第 {fold + 1} 折 AUC: {fold_auc:.4f}\")\n",
    "            else:\n",
    "                logger.warning(f\"  第 {fold + 1} 折预测失败\")\n",
    "        \n",
    "        if cv_scores:\n",
    "            # 计算交叉验证统计信息\n",
    "            mean_auc = np.mean(cv_scores)\n",
    "            std_auc = np.std(cv_scores)\n",
    "            logger.info(f\"交叉验证结果: {mean_auc:.4f} (±{std_auc:.4f})\")\n",
    "        else:\n",
    "            logger.error(\"所有折的验证都失败了\")\n",
    "            mean_auc = 0.0\n",
    "        \n",
    "        # 在全部数据上训练最终模型\n",
    "        logger.info(\"在全部数据上训练最终集成模型...\")\n",
    "        X_train, X_val, y_train, y_val = train_test_split(\n",
    "            X, y, test_size=0.2, random_state=42, stratify=y\n",
    "        )\n",
    "        \n",
    "        self.ensemble_model.train_all_models(X_train, y_train, X_val, y_val)\n",
    "        \n",
    "        # 验证最终模型\n",
    "        val_preds = self.ensemble_model.predict(X_val)\n",
    "        if len(val_preds) > 0 and not np.all(val_preds == 0):\n",
    "            self.best_auc = roc_auc_score(y_val, val_preds)\n",
    "            logger.info(f\"最终集成模型验证集AUC: {self.best_auc:.4f}\")\n",
    "        else:\n",
    "            logger.error(\"最终模型预测失败\")\n",
    "            self.best_auc = 0.0\n",
    "        \n",
    "        return self.ensemble_model\n",
    "    \n",
    "    def predict(self, X):\n",
    "        \"\"\"预测\"\"\"\n",
    "        # 数据预处理\n",
    "        X = X.fillna(0)\n",
    "        X = X.replace([np.inf, -np.inf], 0)\n",
    "        return self.ensemble_model.predict(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "0f4d4f3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块8：预测与结果保存\n",
    "# 功能：处理模型预测结果，保存到文件\n",
    "# =============================================================================\n",
    "\n",
    "class Predictor:\n",
    "    \"\"\"\n",
    "    预测处理类\n",
    "    - 生成预测结果\n",
    "    - 保存结果到CSV文件\n",
    "    - 提供结果统计信息\n",
    "    \"\"\"\n",
    "    \n",
    "    @staticmethod\n",
    "    def predict_ensemble(ensemble_model, test_data, features):\n",
    "        \"\"\"生成集成模型预测结果\"\"\"\n",
    "        logger.info(\"开始生成集成预测结果...\")\n",
    "        \n",
    "        predictions = test_data[['user_id', 'merchant_id']].copy()\n",
    "        \n",
    "        # 确保所有特征都存在\n",
    "        missing_features = []\n",
    "        for feature in features:\n",
    "            if feature not in test_data.columns:\n",
    "                test_data[feature] = 0\n",
    "                missing_features.append(feature)\n",
    "        \n",
    "        if missing_features:\n",
    "            logger.warning(f\"以下特征在测试集中缺失，已填充0: {missing_features}\")\n",
    "        \n",
    "        # 进行集成预测\n",
    "        predictions['prob'] = ensemble_model.predict(test_data[features])\n",
    "        predictions['label'] = (predictions['prob'] > 0.5).astype(int)\n",
    "        \n",
    "        # 预测结果统计\n",
    "        pos_ratio = predictions['label'].mean()\n",
    "        prob_stats = predictions['prob'].describe()\n",
    "        \n",
    "        logger.info(\"预测结果统计:\")\n",
    "        logger.info(f\"  正例比例: {pos_ratio:.4f}\")\n",
    "        logger.info(f\"  概率分布: min={prob_stats['min']:.4f}, max={prob_stats['max']:.4f}\")\n",
    "        logger.info(f\"  概率均值: {prob_stats['mean']:.4f}, 标准差: {prob_stats['std']:.4f}\")\n",
    "        \n",
    "        return predictions\n",
    "    \n",
    "    @staticmethod\n",
    "    def save_results(predictions, file_path):\n",
    "        \"\"\"保存结果到CSV文件\"\"\"\n",
    "        logger.info(f\"保存预测结果到: {file_path}\")\n",
    "        \n",
    "        # 确保目录存在\n",
    "        os.makedirs(os.path.dirname(file_path), exist_ok=True)\n",
    "        \n",
    "        # 保存结果\n",
    "        predictions.to_csv(file_path, index=False)\n",
    "        \n",
    "        # 验证保存结果\n",
    "        if os.path.exists(file_path):\n",
    "            file_size = os.path.getsize(file_path) / 1024 / 1024  # MB\n",
    "            logger.info(f\"结果保存成功，文件大小: {file_size:.2f} MB\")\n",
    "            logger.info(f\"预测样本数: {len(predictions)}\")\n",
    "        else:\n",
    "            logger.error(\"结果保存失败！\")\n",
    "            \n",
    "        return file_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "ec51845e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# =============================================================================\n",
    "# 模块9：主程序执行器\n",
    "# 功能：整合所有模块，执行完整的预测流程\n",
    "# =============================================================================\n",
    "\n",
    "def main():\n",
    "    \"\"\"\n",
    "    主程序函数\n",
    "    整合所有模块，执行完整的商家重复购买预测流程\n",
    "    \"\"\"\n",
    "    start_time = time.time()\n",
    "    \n",
    "    try:\n",
    "        logger.info(\"=\"*60)\n",
    "        logger.info(\"开始执行商家重复购买预测任务（优化版本）\")\n",
    "        logger.info(\"=\"*60)\n",
    "        \n",
    "        # 第1步：配置路径\n",
    "        logger.info(\"第1步：配置系统路径...\")\n",
    "        BASE_DIR = 'F:/H/jqxx-master/hjh'\n",
    "        DATA_DIR = os.path.join(BASE_DIR, 'data')\n",
    "        OUTPUT_DIR = os.path.join(BASE_DIR, 'results')\n",
    "        os.makedirs(OUTPUT_DIR, exist_ok=True)\n",
    "        logger.info(f\"  数据目录: {DATA_DIR}\")\n",
    "        logger.info(f\"  输出目录: {OUTPUT_DIR}\")\n",
    "        \n",
    "        # 第2步：初始化组件\n",
    "        logger.info(\"第2步：初始化系统组件...\")\n",
    "        data_loader = DataLoader(DATA_DIR)\n",
    "        feature_engineer = EnhancedFeatureEngineer()\n",
    "        model_trainer = OptimizedModelTrainer()\n",
    "        \n",
    "        # 第3步：验证数据文件\n",
    "        logger.info(\"第3步：验证数据文件...\")\n",
    "        data_loader.validate_files()\n",
    "        \n",
    "        # 第4步：数据预处理\n",
    "        logger.info(\"第4步：执行数据预处理...\")\n",
    "        # 可选：清除缓存重新生成特征\n",
    "        # data_loader.clear_cache()\n",
    "        train_data, test_data = data_loader.preprocess()\n",
    "        logger.info(f\"  训练集形状: {train_data.shape}\")\n",
    "        logger.info(f\"  测试集形状: {test_data.shape}\")\n",
    "        \n",
    "        # 第5步：高级特征工程\n",
    "        logger.info(\"第5步：执行高级特征工程...\")\n",
    "        user_log = data_loader.process_user_log_optimized(data_loader.file_paths['user_log'])\n",
    "        \n",
    "        # 设置缓存目录\n",
    "        feature_engineer.cache_dir = data_loader.cache_dir\n",
    "        \n",
    "        logger.info(\"  处理训练集高级特征...\")\n",
    "        train_data = feature_engineer.create_advanced_features(train_data, user_log)\n",
    "        \n",
    "        logger.info(\"  处理测试集高级特征...\")\n",
    "        test_data = feature_engineer.create_advanced_features(test_data, user_log)\n",
    "        \n",
    "        logger.info(f\"  最终特征数: {train_data.shape[1]}\")\n",
    "        \n",
    "        # 第6步：特征选择\n",
    "        logger.info(\"第6步：执行特征选择...\")\n",
    "        \n",
    "        # 数据清理 - 处理异常值和缺失值\n",
    "        train_data = train_data.fillna(0)\n",
    "        train_data = train_data.replace([np.inf, -np.inf], 0)\n",
    "        test_data = test_data.fillna(0)\n",
    "        test_data = test_data.replace([np.inf, -np.inf], 0)\n",
    "        \n",
    "        # 移除常数特征和高相关特征\n",
    "        logger.info(\"  - 移除常数特征...\")\n",
    "        constant_features = []\n",
    "        for col in train_data.columns:\n",
    "            if col not in ['user_id', 'merchant_id', 'label']:\n",
    "                if train_data[col].nunique() <= 1:\n",
    "                    constant_features.append(col)\n",
    "        \n",
    "        if constant_features:\n",
    "            train_data = train_data.drop(columns=constant_features)\n",
    "            test_data = test_data.drop(columns=constant_features, errors='ignore')\n",
    "            logger.info(f\"  - 移除了 {len(constant_features)} 个常数特征\")\n",
    "        \n",
    "        # 特征选择\n",
    "        try:\n",
    "            features = feature_engineer.select_features_advanced(train_data, num_features=60)\n",
    "            logger.info(f\"  选择特征数: {len(features)}\")\n",
    "        except Exception as e:\n",
    "            logger.error(f\"高级特征选择失败: {e}\")\n",
    "            # 回退到基础特征选择\n",
    "            logger.info(\"  使用基础特征选择...\")\n",
    "            features = feature_engineer.select_features(train_data, num_features=40)\n",
    "            logger.info(f\"  选择特征数: {len(features)}\")\n",
    "        \n",
    "        # 第7步：准备训练数据\n",
    "        logger.info(\"第7步：准备训练数据...\")\n",
    "        \n",
    "        # 确保所有特征都存在\n",
    "        missing_features = []\n",
    "        for feature in features:\n",
    "            if feature not in train_data.columns:\n",
    "                train_data[feature] = 0\n",
    "                missing_features.append(feature)\n",
    "            if feature not in test_data.columns:\n",
    "                test_data[feature] = 0\n",
    "        \n",
    "        if missing_features:\n",
    "            logger.warning(f\"  缺失特征已填充0: {missing_features}\")\n",
    "        \n",
    "        X = train_data[features].copy()\n",
    "        y = train_data['label'].copy()\n",
    "        \n",
    "        # 最终数据检查\n",
    "        X = X.fillna(0).replace([np.inf, -np.inf], 0)\n",
    "        \n",
    "        logger.info(f\"  特征矩阵形状: {X.shape}\")\n",
    "        logger.info(f\"  标签分布: 正例={y.sum()}, 负例={len(y)-y.sum()}\")\n",
    "        logger.info(f\"  正例比例: {y.mean():.4f}\")\n",
    "        \n",
    "        # 第8步：模型训练\n",
    "        logger.info(\"第8步：执行模型训练...\")\n",
    "        model_trainer.setup_models()\n",
    "        \n",
    "        try:\n",
    "            ensemble_model = model_trainer.train_with_cv(X, y, cv_folds=3)\n",
    "        except Exception as e:\n",
    "            logger.error(f\"交叉验证训练失败: {e}\")\n",
    "            logger.info(\"尝试简单训练...\")\n",
    "            \n",
    "            # 回退到简单训练\n",
    "            X_train, X_val, y_train, y_val = train_test_split(\n",
    "                X, y, test_size=0.2, random_state=42, stratify=y\n",
    "            )\n",
    "            model_trainer.ensemble_model.train_all_models(X_train, y_train, X_val, y_val)\n",
    "            ensemble_model = model_trainer.ensemble_model\n",
    "            \n",
    "            val_preds = ensemble_model.predict(X_val)\n",
    "            if len(val_preds) > 0 and not np.all(val_preds == 0):\n",
    "                model_trainer.best_auc = roc_auc_score(y_val, val_preds)\n",
    "            else:\n",
    "                model_trainer.best_auc = 0.0\n",
    "        \n",
    "        # 第9步：生成预测\n",
    "        logger.info(\"第9步：生成预测结果...\")\n",
    "        \n",
    "        # 确保测试集有所有需要的特征\n",
    "        for feature in features:\n",
    "            if feature not in test_data.columns:\n",
    "                test_data[feature] = 0\n",
    "        \n",
    "        predictions = Predictor.predict_ensemble(ensemble_model, test_data, features)\n",
    "        \n",
    "        # 第10步：保存结果\n",
    "        logger.info(\"第10步：保存预测结果...\")\n",
    "        timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "        result_file = os.path.join(OUTPUT_DIR, f'enhanced_ensemble_predictions_{timestamp}.csv')\n",
    "        Predictor.save_results(predictions, result_file)\n",
    "        \n",
    "        # 任务完成统计\n",
    "        total_time = time.time() - start_time\n",
    "        logger.info(\"=\"*60)\n",
    "        logger.info(\"任务执行完成！\")\n",
    "        logger.info(\"=\"*60)\n",
    "        logger.info(f\"总耗时: {total_time:.2f} 秒 ({total_time/60:.1f} 分钟)\")\n",
    "        logger.info(f\"最终AUC: {model_trainer.best_auc:.4f}\")\n",
    "        logger.info(f\"预测样本数: {len(predictions)}\")\n",
    "        logger.info(f\"结果文件: {result_file}\")\n",
    "        logger.info(\"=\"*60)\n",
    "        \n",
    "        # 显示预测样例\n",
    "        logger.info(\"预测结果样例:\")\n",
    "        print(predictions.head(10))\n",
    "        \n",
    "        # 显示模型性能摘要\n",
    "        if model_trainer.ensemble_model.model_scores:\n",
    "            logger.info(\"各模型性能摘要:\")\n",
    "            for name, score in sorted(model_trainer.ensemble_model.model_scores.items(), \n",
    "                                    key=lambda x: x[1], reverse=True):\n",
    "                logger.info(f\"  {name}: {score:.4f}\")\n",
    "        \n",
    "        return {\n",
    "            'status': 'success',\n",
    "            'auc': model_trainer.best_auc,\n",
    "            'predictions': len(predictions),\n",
    "            'result_file': result_file,\n",
    "            'total_time': total_time,\n",
    "            'model_count': len([m for m in model_trainer.ensemble_model.models.values() \n",
    "                              if m['trained_model'] is not None])\n",
    "        }\n",
    "        \n",
    "    except Exception as e:\n",
    "        logger.error(\"=\"*60)\n",
    "        logger.error(\"程序执行出错！\")\n",
    "        logger.error(\"=\"*60)\n",
    "        logger.exception(f\"错误详情: {str(e)}\")\n",
    "        \n",
    "        return {\n",
    "            'status': 'error',\n",
    "            'error_message': str(e),\n",
    "            'total_time': time.time() - start_time\n",
    "        }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0092a696",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-06-09 11:00:18,301 - INFO - ============================================================\n",
      "2025-06-09 11:00:18,301 - INFO - 开始执行商家重复购买预测任务（优化版本）\n",
      "2025-06-09 11:00:18,301 - INFO - ============================================================\n",
      "2025-06-09 11:00:18,302 - INFO - 第1步：配置系统路径...\n",
      "2025-06-09 11:00:18,303 - INFO -   数据目录: F:/H/jqxx-master/hjh\\data\n",
      "2025-06-09 11:00:18,303 - INFO -   输出目录: F:/H/jqxx-master/hjh\\results\n",
      "2025-06-09 11:00:18,303 - INFO - 第2步：初始化系统组件...\n",
      "2025-06-09 11:00:18,304 - INFO - 数据加载器初始化完成，数据目录: F:/H/jqxx-master/hjh\\data\n",
      "2025-06-09 11:00:18,304 - INFO - 缓存目录: F:/H/jqxx-master/hjh\\feature_cache\n",
      "2025-06-09 11:00:18,305 - INFO - 基础特征工程器初始化完成\n",
      "2025-06-09 11:00:18,305 - INFO - 增强特征工程器初始化完成\n",
      "2025-06-09 11:00:18,305 - INFO - 增强集成模型系统初始化完成\n",
      "2025-06-09 11:00:18,305 - INFO - 优化模型训练器初始化完成\n",
      "2025-06-09 11:00:18,306 - INFO - 第3步：验证数据文件...\n",
      "2025-06-09 11:00:18,307 - INFO - 所有数据文件验证通过\n",
      "2025-06-09 11:00:18,307 - INFO - 第4步：执行数据预处理...\n",
      "2025-06-09 11:00:18,308 - INFO - 正在加载文件: train\n",
      "2025-06-09 11:00:18,378 - INFO - 正在加载文件: test\n",
      "2025-06-09 11:00:18,390 - INFO - 正在加载文件: user_info\n",
      "2025-06-09 11:00:18,550 - INFO - 开始处理用户日志数据...\n",
      "2025-06-09 11:00:18,550 - INFO - 开始处理用户日志...\n",
      "2025-06-09 11:00:18,974 - INFO - 已处理 1000000 行\n",
      "2025-06-09 11:00:19,764 - INFO - 已处理 3000000 行\n",
      "2025-06-09 11:00:20,248 - INFO - 已处理 4070859 行\n",
      "2025-06-09 11:00:20,359 - INFO - 用户日志缓存保存成功\n",
      "2025-06-09 11:00:20,360 - INFO - 用户日志处理完成，总计 4070859 行\n",
      "2025-06-09 11:00:20,368 - INFO - 开始生成基础特征...\n",
      "2025-06-09 11:00:20,369 - INFO - 开始生成优化特征...\n",
      "2025-06-09 11:00:20,370 - INFO -   - 计算行为特征...\n"
     ]
    }
   ],
   "source": [
    "# 程序入口点\n",
    "if __name__ == \"__main__\":\n",
    "    result = main()\n",
    "    \n",
    "    if result['status'] == 'success':\n",
    "        print(f\"\\n🎉 任务成功完成！\")\n",
    "        print(f\"📊 模型AUC: {result['auc']:.4f}\")\n",
    "        print(f\"🤖 成功训练模型数: {result.get('model_count', 0)}\")\n",
    "        print(f\"📁 结果文件: {result['result_file']}\")\n",
    "        print(f\"⏱️ 总耗时: {result['total_time']/60:.1f} 分钟\")\n",
    "    else:\n",
    "        print(f\"\\n❌ 任务执行失败！\")\n",
    "        print(f\"🔍 错误信息: {result['error_message']}\")\n",
    "        print(f\"⏱️ 耗时: {result['total_time']/60:.1f} 分钟\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
