{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7776be7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import logging\n",
    "import time\n",
    "from datetime import datetime\n",
    "from sklearn.model_selection import train_test_split, StratifiedKFold\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from imblearn.over_sampling import SMOTE\n",
    "import lightgbm as lgb\n",
    "import gc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e9d17a1e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 配置日志\n",
    "logging.basicConfig(\n",
    "    level=logging.INFO,\n",
    "    format='%(asctime)s - %(levelname)s - %(message)s',\n",
    "    handlers=[logging.StreamHandler()]\n",
    ")\n",
    "logger = logging.getLogger(__name__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "4e4efe0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 内存优化函数\n",
    "def reduce_mem_usage(df):\n",
    "    \"\"\"迭代优化DataFrame内存使用\"\"\"\n",
    "    start_mem = df.memory_usage().sum() / 1024**2\n",
    "    logger.info(f\"优化前内存使用: {start_mem:.2f} MB\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        col_type = df[col].dtype\n",
    "        \n",
    "        if col_type != object:\n",
    "            c_min = df[col].min()\n",
    "            c_max = df[col].max()\n",
    "            if str(col_type)[:3] == 'int':\n",
    "                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n",
    "                    df[col] = df[col].astype(np.int8)\n",
    "                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n",
    "                    df[col] = df[col].astype(np.int16)\n",
    "                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n",
    "                    df[col] = df[col].astype(np.int32)\n",
    "                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n",
    "                    df[col] = df[col].astype(np.int64)\n",
    "            else:\n",
    "                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n",
    "                    df[col] = df[col].astype(np.float16)\n",
    "                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n",
    "                    df[col] = df[col].astype(np.float32)\n",
    "                else:\n",
    "                    df[col] = df[col].astype(np.float64)\n",
    "    \n",
    "    end_mem = df.memory_usage().sum() / 1024**2\n",
    "    logger.info(f\"优化后内存使用: {end_mem:.2f} MB ({100*(start_mem-end_mem)/start_mem:.1f}% 减少)\")\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "09fa142e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据加载与预处理\n",
    "class DataLoader:\n",
    "    def __init__(self, data_dir):\n",
    "        self.data_dir = data_dir\n",
    "        self.file_paths = {\n",
    "            'train': os.path.join(data_dir, 'train.csv'),\n",
    "            'test': os.path.join(data_dir, 'test_without_label.csv'),\n",
    "            'user_log': os.path.join(data_dir, 'user_log_format2.csv'),\n",
    "            'user_info': os.path.join(data_dir, 'user_info_format1.csv')\n",
    "        }\n",
    "        \n",
    "    def validate_files(self):\n",
    "        \"\"\"检查所有必需文件是否存在\"\"\"\n",
    "        missing_files = []\n",
    "        for name, path in self.file_paths.items():\n",
    "            if not os.path.exists(path):\n",
    "                missing_files.append((name, path))\n",
    "        \n",
    "        if missing_files:\n",
    "            logger.error(\"以下文件缺失:\")\n",
    "            for name, path in missing_files:\n",
    "                logger.error(f\"- {name}: {path}\")\n",
    "            raise FileNotFoundError(\"必需数据文件缺失，请检查路径\")\n",
    "    \n",
    "    def load_data(self, file_type):\n",
    "        \"\"\"加载指定类型的数据\"\"\"\n",
    "        path = self.file_paths.get(file_type)\n",
    "        if not path:\n",
    "            raise ValueError(f\"未知文件类型: {file_type}\")\n",
    "        \n",
    "        if not os.path.exists(path):\n",
    "            raise FileNotFoundError(f\"文件不存在: {path}\")\n",
    "        \n",
    "        logger.info(f\"加载文件: {path}\")\n",
    "        return pd.read_csv(path)\n",
    "    \n",
    "    def preprocess(self):\n",
    "        \"\"\"执行完整的数据预处理流程\"\"\"\n",
    "        start_time = time.time()\n",
    "        \n",
    "        # 检查是否存在缓存的特征文件\n",
    "        cache_dir = os.path.join(os.path.dirname(self.data_dir), 'feature_cache')\n",
    "        os.makedirs(cache_dir, exist_ok=True)\n",
    "        \n",
    "        train_cache_file = os.path.join(cache_dir, 'train_features.pkl')\n",
    "        test_cache_file = os.path.join(cache_dir, 'test_features.pkl')\n",
    "        \n",
    "        # 如果缓存文件存在，直接加载\n",
    "        if os.path.exists(train_cache_file) and os.path.exists(test_cache_file):\n",
    "            logger.info(\"发现特征缓存文件，正在加载...\")\n",
    "            try:\n",
    "                train = pd.read_pickle(train_cache_file)\n",
    "                test = pd.read_pickle(test_cache_file)\n",
    "                logger.info(f\"特征缓存加载完成! 耗时: {time.time()-start_time:.2f}秒\")\n",
    "                return train, test\n",
    "            except Exception as e:\n",
    "                logger.warning(f\"加载缓存文件失败: {e}\")\n",
    "                logger.info(\"将重新生成特征...\")\n",
    "        \n",
    "        # 加载基础数据\n",
    "        train = self.load_data('train')\n",
    "        test = self.load_data('test')\n",
    "        user_info = self.load_data('user_info')\n",
    "        \n",
    "        # 处理用户信息缺失值\n",
    "        user_info['age_range'] = user_info['age_range'].fillna(0).astype(np.int8)\n",
    "        user_info['gender'] = user_info['gender'].fillna(2).astype(np.int8)\n",
    "        \n",
    "        # 合并用户信息\n",
    "        train = train.merge(user_info, on='user_id', how='left')\n",
    "        test = test.merge(user_info, on='user_id', how='left')\n",
    "        \n",
    "        # 处理用户日志\n",
    "        logger.info(\"处理用户日志数据...\")\n",
    "        user_log = self.process_user_log(self.file_paths['user_log'])\n",
    "        \n",
    "        # 生成特征\n",
    "        logger.info(\"生成用户-商家特征...\")\n",
    "        train = self.generate_features(train, user_log)\n",
    "        test = self.generate_features(test, user_log)\n",
    "        \n",
    "        # 内存优化\n",
    "        logger.info(\"优化内存使用...\")\n",
    "        train = reduce_mem_usage(train)\n",
    "        test = reduce_mem_usage(test)\n",
    "        \n",
    "        # 保存特征到缓存\n",
    "        logger.info(\"保存特征到缓存...\")\n",
    "        try:\n",
    "            train.to_pickle(train_cache_file)\n",
    "            test.to_pickle(test_cache_file)\n",
    "            logger.info(f\"特征已保存到缓存: {cache_dir}\")\n",
    "        except Exception as e:\n",
    "            logger.warning(f\"保存缓存文件失败: {e}\")\n",
    "        \n",
    "        logger.info(f\"数据预处理完成! 耗时: {time.time()-start_time:.2f}秒\")\n",
    "        return train, test\n",
    "\n",
    "    def clear_cache(self):\n",
    "        \"\"\"清除特征缓存文件\"\"\"\n",
    "        cache_dir = os.path.join(os.path.dirname(self.data_dir), 'feature_cache')\n",
    "        cache_files = ['train_features.pkl', 'test_features.pkl']\n",
    "        \n",
    "        for cache_file in cache_files:\n",
    "            file_path = os.path.join(cache_dir, cache_file)\n",
    "            if os.path.exists(file_path):\n",
    "                os.remove(file_path)\n",
    "                logger.info(f\"已删除缓存文件: {file_path}\")\n",
    "        \n",
    "        if os.path.exists(cache_dir) and not os.listdir(cache_dir):\n",
    "            os.rmdir(cache_dir)\n",
    "            logger.info(f\"已删除空缓存目录: {cache_dir}\")\n",
    "            \n",
    "    def process_user_log(self, log_path):\n",
    "        \"\"\"高效处理用户日志数据\"\"\"\n",
    "        chunks = []\n",
    "        chunk_size = 500000\n",
    "        total_rows = 0\n",
    "        \n",
    "        logger.info(f\"分块读取日志文件: {log_path}\")\n",
    "        for i, chunk in enumerate(pd.read_csv(log_path, chunksize=chunk_size)):\n",
    "            # 处理缺失值 - 关键修复\n",
    "            chunk.fillna({\n",
    "                'user_id': 0,\n",
    "                'item_id': 0,\n",
    "                'cat_id': 0,\n",
    "                'merchant_id': 0,\n",
    "                'brand_id': 0,  # 根据数据说明，brand_id缺失用0填充\n",
    "                'time_stamp': 0,\n",
    "                'action_type': 0\n",
    "            }, inplace=True)\n",
    "            \n",
    "            # 确保没有无穷大值\n",
    "            chunk.replace([np.inf, -np.inf], 0, inplace=True)\n",
    "            \n",
    "            # 类型转换\n",
    "            chunk = chunk.astype({\n",
    "                'user_id': 'int32',\n",
    "                'item_id': 'int32',\n",
    "                'cat_id': 'int32',\n",
    "                'merchant_id': 'int32',\n",
    "                'brand_id': 'int32',\n",
    "                'time_stamp': 'int32',\n",
    "                'action_type': 'int8'\n",
    "            })\n",
    "            \n",
    "            # 添加时间特征\n",
    "            chunk['timestamp'] = pd.to_datetime(chunk['time_stamp'], unit='s')\n",
    "            chunk['hour'] = chunk['timestamp'].dt.hour.astype(np.int8)\n",
    "            chunk['day_of_week'] = chunk['timestamp'].dt.dayofweek.astype(np.int8)\n",
    "            \n",
    "            chunks.append(chunk)\n",
    "            total_rows += len(chunk)\n",
    "            logger.info(f\"已处理 {total_rows} 行\")\n",
    "        \n",
    "        return pd.concat(chunks)\n",
    "    \n",
    "    def generate_features(self, df, user_log):\n",
    "        \"\"\"生成特征集\"\"\"\n",
    "        # 用户-商家行为特征\n",
    "        logger.info(\"计算行为特征...\")\n",
    "        behavior_features = user_log.groupby(['user_id', 'merchant_id']).agg(\n",
    "            total_actions=('action_type', 'count'),\n",
    "            clicks=('action_type', lambda x: (x == 0).sum()),\n",
    "            buys=('action_type', lambda x: (x == 1).sum()),\n",
    "            collects=('action_type', lambda x: (x == 2).sum()),\n",
    "            carts=('action_type', lambda x: (x == 3).sum()),\n",
    "            last_action=('time_stamp', 'max'),\n",
    "            first_action=('time_stamp', 'min'),\n",
    "            favorite_hour=('hour', lambda x: x.mode()[0] if not x.empty else 12),\n",
    "            favorite_day=('day_of_week', lambda x: x.mode()[0] if not x.empty else 3)\n",
    "        ).reset_index()\n",
    "        \n",
    "        # 时间特征\n",
    "        behavior_features['action_span'] = behavior_features['last_action'] - behavior_features['first_action']\n",
    "        behavior_features['buy_ratio'] = behavior_features['buys'] / (behavior_features['total_actions'] + 1e-5)\n",
    "        \n",
    "        # 用户特征\n",
    "        logger.info(\"计算用户特征...\")\n",
    "        user_features = user_log.groupby('user_id').agg(\n",
    "            user_total_actions=('action_type', 'count'),\n",
    "            user_unique_merchants=('merchant_id', 'nunique'),\n",
    "            user_unique_items=('item_id', 'nunique'),\n",
    "            user_unique_categories=('cat_id', 'nunique'),\n",
    "            user_avg_actions=('action_type', 'mean'),\n",
    "            user_first_purchase_time=('time_stamp', lambda x: x.min() if (x == 1).any() else 0),\n",
    "            user_last_purchase_time=('time_stamp', lambda x: x.max() if (x == 1).any() else 0)\n",
    "        ).reset_index()\n",
    "        user_features['user_purchase_duration'] = user_features['user_last_purchase_time'] - user_features['user_first_purchase_time']\n",
    "        \n",
    "        # 商家特征\n",
    "        logger.info(\"计算商家特征...\")\n",
    "        merchant_features = user_log.groupby('merchant_id').agg(\n",
    "            merchant_total_actions=('action_type', 'count'),\n",
    "            merchant_unique_users=('user_id', 'nunique'),\n",
    "            merchant_unique_items=('item_id', 'nunique'),\n",
    "            merchant_unique_categories=('cat_id', 'nunique'),\n",
    "            merchant_purchase_count=('action_type', lambda x: (x == 1).sum()),\n",
    "            merchant_click_count=('action_type', lambda x: (x == 0).sum())\n",
    "        ).reset_index()\n",
    "        merchant_features['merchant_purchase_rate'] = merchant_features['merchant_purchase_count'] / (merchant_features['merchant_total_actions'] + 1e-5)\n",
    "        \n",
    "        # 用户-商家交互特征\n",
    "        logger.info(\"计算用户-商家交互特征...\")\n",
    "        user_merchant_features = user_log.groupby(['user_id', 'merchant_id']).agg(\n",
    "            um_click_count=('action_type', lambda x: (x == 0).sum()),\n",
    "            um_purchase_count=('action_type', lambda x: (x == 1).sum()),\n",
    "            um_cart_count=('action_type', lambda x: (x == 3).sum()),\n",
    "            um_collect_count=('action_type', lambda x: (x == 2).sum()),\n",
    "            um_unique_items=('item_id', 'nunique'),\n",
    "            um_unique_categories=('cat_id', 'nunique')\n",
    "        ).reset_index()\n",
    "        \n",
    "        # 合并所有特征\n",
    "        logger.info(\"合并特征...\")\n",
    "        df = df.merge(behavior_features, on=['user_id', 'merchant_id'], how='left')\n",
    "        df = df.merge(user_features, on='user_id', how='left')\n",
    "        df = df.merge(merchant_features, on='merchant_id', how='left')\n",
    "        df = df.merge(user_merchant_features, on=['user_id', 'merchant_id'], how='left')\n",
    "        \n",
    "        # 填充缺失值\n",
    "        fill_values = {\n",
    "            'total_actions': 0,\n",
    "            'clicks': 0,\n",
    "            'buys': 0,\n",
    "            'collects': 0,\n",
    "            'carts': 0,\n",
    "            'action_span': 0,\n",
    "            'buy_ratio': 0,\n",
    "            'user_total_actions': 0,\n",
    "            'user_unique_merchants': 0,\n",
    "            'user_unique_items': 0,\n",
    "            'user_unique_categories': 0,\n",
    "            'user_avg_actions': 0,\n",
    "            'user_first_purchase_time': 0,\n",
    "            'user_last_purchase_time': 0,\n",
    "            'user_purchase_duration': 0,\n",
    "            'merchant_total_actions': 0,\n",
    "            'merchant_unique_users': 0,\n",
    "            'merchant_unique_items': 0,\n",
    "            'merchant_unique_categories': 0,\n",
    "            'merchant_purchase_count': 0,\n",
    "            'merchant_click_count': 0,\n",
    "            'merchant_purchase_rate': 0,\n",
    "            'um_click_count': 0,\n",
    "            'um_purchase_count': 0,\n",
    "            'um_cart_count': 0,\n",
    "            'um_collect_count': 0,\n",
    "            'um_unique_items': 0,\n",
    "            'um_unique_categories': 0\n",
    "        }\n",
    "        df.fillna(fill_values, inplace=True)\n",
    "        \n",
    "        # 添加组合特征\n",
    "        df['user_merchant_interaction'] = df['total_actions'] * df['merchant_total_actions']\n",
    "        df['buy_intensity'] = df['buys'] / (df['user_total_actions'] + 1e-5)\n",
    "        df['user_activity_level'] = np.log1p(df['user_total_actions'])\n",
    "        df['merchant_popularity'] = np.log1p(df['merchant_unique_users'])\n",
    "        df['purchase_conversion'] = df['um_purchase_count'] / (df['um_click_count'] + 1e-5)\n",
    "        \n",
    "        return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "daa1f0fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 特征工程\n",
    "class FeatureEngineer:\n",
    "    def __init__(self):\n",
    "        self.selected_features = None\n",
    "    \n",
    "    def select_features(self, train_data, target='label', num_features=30):\n",
    "        \"\"\"基于重要性选择特征\"\"\"\n",
    "        logger.info(\"执行特征选择...\")\n",
    "        \n",
    "        # 分离特征和标签\n",
    "        X = train_data.drop(columns=[target, 'user_id', 'merchant_id'], errors='ignore')\n",
    "        y = train_data[target]\n",
    "        \n",
    "        # 训练LightGBM模型获取特征重要性\n",
    "        model = lgb.LGBMClassifier(\n",
    "            n_estimators=100,\n",
    "            random_state=42,\n",
    "            verbose=-1,\n",
    "            class_weight='balanced'\n",
    "        )\n",
    "        model.fit(X, y)\n",
    "        \n",
    "        # 获取特征重要性\n",
    "        importance = pd.DataFrame({\n",
    "            'feature': X.columns,\n",
    "            'importance': model.feature_importances_\n",
    "        }).sort_values('importance', ascending=False)\n",
    "        \n",
    "        # 选择最重要的特征\n",
    "        self.selected_features = importance.head(num_features)['feature'].tolist()\n",
    "        logger.info(f\"选择了 {len(self.selected_features)} 个特征:\\n{self.selected_features}\")\n",
    "        return self.selected_features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0f4d4f3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型训练\n",
    "class ModelTrainer:\n",
    "    def __init__(self):\n",
    "        self.model = None\n",
    "        self.best_params = None\n",
    "        self.auc_score = 0.0\n",
    "    \n",
    "    def train(self, X_train, y_train, X_val=None, y_val=None):\n",
    "        \"\"\"训练带早停的LightGBM模型\"\"\"\n",
    "        logger.info(\"开始模型训练...\")\n",
    "        \n",
    "        # 处理样本不平衡\n",
    "        smote = SMOTE(sampling_strategy=0.3, random_state=42)\n",
    "        X_res, y_res = smote.fit_resample(X_train, y_train)\n",
    "        \n",
    "        # 创建数据集\n",
    "        train_data = lgb.Dataset(X_res, label=y_res)\n",
    "        if X_val is not None and y_val is not None:\n",
    "            valid_data = lgb.Dataset(X_val, label=y_val, reference=train_data)\n",
    "        \n",
    "        # 模型参数\n",
    "        params = {\n",
    "            'objective': 'binary',\n",
    "            'metric': 'auc',\n",
    "            'boosting_type': 'gbdt',\n",
    "            'learning_rate': 0.05,\n",
    "            'num_leaves': 31,\n",
    "            'max_depth': 7,\n",
    "            'min_child_samples': 20,\n",
    "            'feature_fraction': 0.8,\n",
    "            'bagging_fraction': 0.8,\n",
    "            'bagging_freq': 5,\n",
    "            'reg_alpha': 0.1,\n",
    "            'reg_lambda': 0.1,\n",
    "            'random_state': 42,\n",
    "            'n_jobs': -1,\n",
    "            'verbose': -1\n",
    "        }\n",
    "        \n",
    "        # 设置回调函数\n",
    "        callbacks = []\n",
    "        if X_val is not None:\n",
    "            callbacks.append(lgb.early_stopping(50))\n",
    "            callbacks.append(lgb.log_evaluation(50))\n",
    "        else:\n",
    "            callbacks.append(lgb.log_evaluation(50))\n",
    "        \n",
    "        # 训练模型\n",
    "        self.model = lgb.train(\n",
    "            params,\n",
    "            train_data,\n",
    "            num_boost_round=1000,\n",
    "            valid_sets=[train_data, valid_data] if X_val is not None else [train_data],\n",
    "            valid_names=['train', 'valid'] if X_val is not None else ['train'],\n",
    "            callbacks=callbacks\n",
    "        )\n",
    "        \n",
    "        # 评估模型\n",
    "        if X_val is not None:\n",
    "            val_preds = self.model.predict(X_val, num_iteration=self.model.best_iteration)\n",
    "            self.auc_score = roc_auc_score(y_val, val_preds)\n",
    "            logger.info(f\"验证集AUC: {self.auc_score:.4f}\")\n",
    "        \n",
    "        return self.model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ec51845e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测与结果保存\n",
    "class Predictor:\n",
    "    @staticmethod\n",
    "    def predict(model, test_data, features):\n",
    "        \"\"\"生成预测结果\"\"\"\n",
    "        logger.info(\"生成预测结果...\")\n",
    "        predictions = test_data[['user_id', 'merchant_id']].copy()\n",
    "        \n",
    "        # 确保特征存在\n",
    "        for feature in features:\n",
    "            if feature not in test_data.columns:\n",
    "                test_data[feature] = 0\n",
    "                logger.warning(f\"特征 '{feature}' 在测试集中不存在，已填充0\")\n",
    "        \n",
    "        # 进行预测\n",
    "        predictions['prob'] = model.predict(test_data[features], num_iteration=model.best_iteration)\n",
    "        predictions['label'] = (predictions['prob'] > 0.5).astype(int)\n",
    "        return predictions\n",
    "    \n",
    "    @staticmethod\n",
    "    def save_results(predictions, file_path):\n",
    "        \"\"\"保存结果到CSV\"\"\"\n",
    "        logger.info(f\"保存结果到: {file_path}\")\n",
    "        predictions.to_csv(file_path, index=False)\n",
    "        return file_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "5cb7da9e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 主程序\n",
    "def main():\n",
    "    try:\n",
    "        # 配置路径\n",
    "        BASE_DIR = 'F:/H/jqxx-master/hjh'\n",
    "        DATA_DIR = os.path.join(BASE_DIR, 'data')\n",
    "        OUTPUT_DIR = os.path.join(BASE_DIR, 'results')\n",
    "        os.makedirs(OUTPUT_DIR, exist_ok=True)\n",
    "        \n",
    "        # 初始化组件\n",
    "        data_loader = DataLoader(DATA_DIR)\n",
    "        feature_engineer = FeatureEngineer()\n",
    "        model_trainer = ModelTrainer()\n",
    "        \n",
    "        # 验证文件\n",
    "        logger.info(\"验证数据文件...\")\n",
    "        data_loader.validate_files()\n",
    "        \n",
    "        # 清除旧缓存（在需要重新生成特征时取消注释）\n",
    "        # data_loader.clear_cache()\n",
    "        \n",
    "        # 加载并预处理数据（会自动使用缓存）\n",
    "        logger.info(\"加载并预处理数据...\")\n",
    "        train_data, test_data = data_loader.preprocess()\n",
    "        \n",
    "        # 特征选择\n",
    "        features = feature_engineer.select_features(train_data, num_features=25)\n",
    "        \n",
    "        # 准备训练数据\n",
    "        X = train_data[features]\n",
    "        y = train_data['label']\n",
    "        \n",
    "        # 划分训练集和验证集\n",
    "        X_train, X_val, y_train, y_val = train_test_split(\n",
    "            X, y, test_size=0.2, random_state=42, stratify=y\n",
    "        )\n",
    "        \n",
    "        # 训练模型\n",
    "        model = model_trainer.train(X_train, y_train, X_val, y_val)\n",
    "        \n",
    "        # 生成预测\n",
    "        predictions = Predictor.predict(model, test_data, features)\n",
    "        \n",
    "        # 保存结果\n",
    "        timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "        result_file = os.path.join(OUTPUT_DIR, f'predictions_{timestamp}.csv')\n",
    "        Predictor.save_results(predictions, result_file)\n",
    "        \n",
    "        # 显示样本结果\n",
    "        logger.info(\"预测结果样例:\")\n",
    "        print(predictions.head(10))\n",
    "        \n",
    "        logger.info(f\"任务完成! AUC: {model_trainer.auc_score:.4f}, 结果保存至: {result_file}\")\n",
    "    \n",
    "    except Exception as e:\n",
    "        logger.exception(\"程序执行出错:\")\n",
    "        raise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "27b1e1d8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-06-06 14:51:11,497 - INFO - 验证数据文件...\n",
      "2025-06-06 14:51:11,498 - INFO - 加载并预处理数据...\n",
      "2025-06-06 14:51:11,499 - INFO - 加载文件: F:/H/jqxx-master/hjh\\data\\train.csv\n",
      "2025-06-06 14:51:11,548 - INFO - 加载文件: F:/H/jqxx-master/hjh\\data\\test_without_label.csv\n",
      "2025-06-06 14:51:11,556 - INFO - 加载文件: F:/H/jqxx-master/hjh\\data\\user_info_format1.csv\n",
      "2025-06-06 14:51:11,713 - INFO - 处理用户日志数据...\n",
      "2025-06-06 14:51:11,714 - INFO - 分块读取日志文件: F:/H/jqxx-master/hjh\\data\\user_log_format2.csv\n",
      "2025-06-06 14:51:11,980 - INFO - 已处理 500000 行\n",
      "2025-06-06 14:51:12,236 - INFO - 已处理 1000000 行\n",
      "2025-06-06 14:51:12,502 - INFO - 已处理 1500000 行\n",
      "2025-06-06 14:51:12,757 - INFO - 已处理 2000000 行\n",
      "2025-06-06 14:51:13,011 - INFO - 已处理 2500000 行\n",
      "2025-06-06 14:51:13,265 - INFO - 已处理 3000000 行\n",
      "2025-06-06 14:51:13,519 - INFO - 已处理 3500000 行\n",
      "2025-06-06 14:51:13,772 - INFO - 已处理 4000000 行\n",
      "2025-06-06 14:51:13,819 - INFO - 已处理 4070859 行\n",
      "2025-06-06 14:51:13,861 - INFO - 生成用户-商家特征...\n",
      "2025-06-06 14:51:13,862 - INFO - 计算行为特征...\n",
      "2025-06-06 14:56:30,425 - INFO - 计算用户特征...\n",
      "2025-06-06 14:57:20,786 - INFO - 计算商家特征...\n",
      "2025-06-06 14:57:25,572 - INFO - 计算用户-商家交互特征...\n",
      "2025-06-06 15:01:12,680 - INFO - 合并特征...\n",
      "2025-06-06 15:01:13,613 - INFO - 计算行为特征...\n",
      "2025-06-06 15:06:50,383 - INFO - 计算用户特征...\n",
      "2025-06-06 15:07:43,249 - INFO - 计算商家特征...\n",
      "2025-06-06 15:07:48,294 - INFO - 计算用户-商家交互特征...\n",
      "2025-06-06 15:11:45,134 - INFO - 合并特征...\n",
      "2025-06-06 15:11:45,672 - INFO - 优化内存使用...\n",
      "2025-06-06 15:11:45,675 - INFO - 优化前内存使用: 66.93 MB\n",
      "2025-06-06 15:11:45,716 - INFO - 优化后内存使用: 18.71 MB (72.0% 减少)\n",
      "2025-06-06 15:11:45,717 - INFO - 优化前内存使用: 8.47 MB\n",
      "2025-06-06 15:11:45,727 - INFO - 优化后内存使用: 2.40 MB (71.6% 减少)\n",
      "2025-06-06 15:11:45,727 - INFO - 保存特征到缓存...\n",
      "2025-06-06 15:11:46,346 - INFO - 特征已保存到缓存: F:/H/jqxx-master/hjh\\feature_cache\n",
      "2025-06-06 15:11:46,347 - INFO - 数据预处理完成! 耗时: 1234.85秒\n",
      "2025-06-06 15:11:46,357 - INFO - 执行特征选择...\n",
      "2025-06-06 15:11:48,555 - INFO - 选择了 25 个特征:\n",
      "['merchant_unique_items', 'merchant_unique_users', 'merchant_purchase_rate', 'merchant_unique_categories', 'merchant_total_actions', 'user_avg_actions', 'user_merchant_interaction', 'merchant_click_count', 'merchant_purchase_count', 'first_action', 'user_total_actions', 'age_range', 'user_unique_items', 'um_unique_items', 'user_unique_categories', 'collects', 'user_unique_merchants', 'um_unique_categories', 'gender', 'total_actions', 'clicks', 'carts', 'merchant_popularity', 'buy_intensity', 'purchase_conversion']\n",
      "2025-06-06 15:11:48,640 - INFO - 开始模型训练...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training until validation scores don't improve for 50 rounds\n",
      "[50]\ttrain's auc: 0.818084\tvalid's auc: 0.577839\n",
      "[100]\ttrain's auc: 0.849556\tvalid's auc: 0.595175\n",
      "[150]\ttrain's auc: 0.864966\tvalid's auc: 0.599869\n",
      "[200]\ttrain's auc: 0.874383\tvalid's auc: 0.601966\n",
      "[250]\ttrain's auc: 0.881623\tvalid's auc: 0.604948\n",
      "[300]\ttrain's auc: 0.887672\tvalid's auc: 0.606624\n",
      "[350]\ttrain's auc: 0.893342\tvalid's auc: 0.609646\n",
      "[400]\ttrain's auc: 0.898427\tvalid's auc: 0.612806\n",
      "[450]\ttrain's auc: 0.902288\tvalid's auc: 0.614485\n",
      "[500]\ttrain's auc: 0.906056\tvalid's auc: 0.615012\n",
      "[550]\ttrain's auc: 0.909395\tvalid's auc: 0.616294\n",
      "[600]\ttrain's auc: 0.912798\tvalid's auc: 0.617231\n",
      "[650]\ttrain's auc: 0.915692\tvalid's auc: 0.617584\n",
      "[700]\ttrain's auc: 0.918622\tvalid's auc: 0.61881\n",
      "[750]\ttrain's auc: 0.92135\tvalid's auc: 0.619523\n",
      "[800]\ttrain's auc: 0.923862\tvalid's auc: 0.619648\n",
      "Early stopping, best iteration is:\n",
      "[783]\ttrain's auc: 0.92317\tvalid's auc: 0.620047\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-06-06 15:12:05,572 - INFO - 验证集AUC: 0.6200\n",
      "2025-06-06 15:12:05,575 - INFO - 生成预测结果...\n",
      "2025-06-06 15:12:05,738 - INFO - 保存结果到: F:/H/jqxx-master/hjh\\results\\predictions_20250606_151205.csv\n",
      "2025-06-06 15:12:05,812 - INFO - 预测结果样例:\n",
      "2025-06-06 15:12:05,816 - INFO - 任务完成! AUC: 0.6200, 结果保存至: F:/H/jqxx-master/hjh\\results\\predictions_20250606_151205.csv\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "   user_id  merchant_id      prob  label\n",
      "0    34176         3906  0.282917      0\n",
      "1    34176          121  0.086985      0\n",
      "2    34176         4356  0.102266      0\n",
      "3    34176         2217  0.087600      0\n",
      "4   230784         4818  0.187028      0\n",
      "5   362112         2618  0.073987      0\n",
      "6    34944         2051  0.183937      0\n",
      "7   231552         3828  0.295563      0\n",
      "8   231552         2124  0.021818      0\n",
      "9   232320         1168  0.064569      0\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
