{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "2f7c6768",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import warnings\n",
    "import gc\n",
    "from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.feature_selection import SelectFromModel\n",
    "from imblearn.over_sampling import SMOTE\n",
    "from imblearn.pipeline import Pipeline as ImbPipeline\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "# 忽略警告\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "965413c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用清华镜像源下载\n",
    "# pip install -i https://pypi.tuna.tsinghua.edu.cn/simple xgboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "0e14ceab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 当前工作目录\n",
    "cwd = 'F:/H/jqxx-master/hjh'\n",
    "os.makedirs(f'{cwd}/tmp', exist_ok=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "d0b1cb36",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 修复的数据预处理函数\n",
    "def enhanced_data_process(samples_file, logs_file, user_info_file=None, need_label=True, out_file=None):\n",
    "    \"\"\"优化的数据预处理函数，修正逻辑错误并减少内存使用\"\"\"\n",
    "    if out_file and os.path.exists(out_file):\n",
    "        return pd.read_csv(out_file)\n",
    "    \n",
    "    samples = pd.read_csv(samples_file)\n",
    "    \n",
    "    print(f\"正在读取日志文件: {logs_file}\")\n",
    "    # 分块读取并立即处理，减少内存占用\n",
    "    user_merchant_features = []\n",
    "    user_features_dict = {}\n",
    "    merchant_features_dict = {}\n",
    "    \n",
    "    chunk_size = 50000\n",
    "    for i, chunk in enumerate(pd.read_csv(logs_file, chunksize=chunk_size)):\n",
    "        print(f\"处理第 {i+1} 块数据，大小: {len(chunk)}\")\n",
    "        \n",
    "        # 处理用户-商家特征\n",
    "        um_chunk = chunk.groupby(['user_id', 'merchant_id']).agg({\n",
    "            'action_type': ['count', lambda x: (x == 0).sum(), lambda x: (x == 1).sum(), \n",
    "                           lambda x: (x == 2).sum(), lambda x: (x == 3).sum()],\n",
    "            'brand_id': 'nunique',\n",
    "            'time_stamp': ['min', 'max']\n",
    "        }).reset_index()\n",
    "        \n",
    "        # 重命名列\n",
    "        um_chunk.columns = ['user_id', 'merchant_id', 'total_actions', 'clicks', 'buys', \n",
    "                           'collects', 'carts', 'unique_brands', 'first_action_time', 'last_action_time']\n",
    "        \n",
    "        user_merchant_features.append(um_chunk)\n",
    "        \n",
    "        # 处理用户特征 - 修正版本\n",
    "        user_chunk = chunk.groupby('user_id').agg({\n",
    "            'action_type': ['count', lambda x: (x == 1).sum(), 'std'],\n",
    "            'merchant_id': 'nunique',\n",
    "            'brand_id': 'nunique',\n",
    "            'time_stamp': ['min', 'max']\n",
    "        }).reset_index()\n",
    "        \n",
    "        # 简化列名\n",
    "        user_chunk.columns = ['user_id', 'total_actions', 'buys', 'action_std', \n",
    "                             'unique_merchants', 'unique_brands', 'first_action', 'last_action']\n",
    "        \n",
    "        # 修正用户特征累积逻辑\n",
    "        for _, row in user_chunk.iterrows():\n",
    "            uid = row['user_id']  # 现在这是一个标量值\n",
    "            if uid in user_features_dict:\n",
    "                user_features_dict[uid]['total_actions'] += row['total_actions']\n",
    "                user_features_dict[uid]['buys'] += row['buys']\n",
    "                user_features_dict[uid]['merchants'].update(chunk[chunk['user_id']==uid]['merchant_id'].unique())\n",
    "                if row['first_action'] < user_features_dict[uid]['first_action']:\n",
    "                    user_features_dict[uid]['first_action'] = row['first_action']\n",
    "                if row['last_action'] > user_features_dict[uid]['last_action']:\n",
    "                    user_features_dict[uid]['last_action'] = row['last_action']\n",
    "            else:\n",
    "                user_features_dict[uid] = {\n",
    "                    'total_actions': row['total_actions'],\n",
    "                    'buys': row['buys'],\n",
    "                    'merchants': set(chunk[chunk['user_id']==uid]['merchant_id'].unique()),\n",
    "                    'first_action': row['first_action'],\n",
    "                    'last_action': row['last_action']\n",
    "                }\n",
    "        \n",
    "        # 处理商家特征\n",
    "        merchant_chunk = chunk.groupby('merchant_id').agg({\n",
    "            'action_type': ['count', lambda x: (x == 1).sum(), 'std'],\n",
    "            'user_id': 'nunique',\n",
    "            'brand_id': 'nunique'\n",
    "        }).reset_index()\n",
    "        \n",
    "        # 简化列名\n",
    "        merchant_chunk.columns = ['merchant_id', 'total_actions', 'buys', 'action_std',\n",
    "                                 'unique_users', 'unique_brands']\n",
    "        \n",
    "        for _, row in merchant_chunk.iterrows():\n",
    "            mid = row['merchant_id']  # 现在这也是一个标量值\n",
    "            if mid in merchant_features_dict:\n",
    "                merchant_features_dict[mid]['total_actions'] += row['total_actions']\n",
    "                merchant_features_dict[mid]['buys'] += row['buys']\n",
    "                merchant_features_dict[mid]['users'].update(chunk[chunk['merchant_id']==mid]['user_id'].unique())\n",
    "            else:\n",
    "                merchant_features_dict[mid] = {\n",
    "                    'total_actions': row['total_actions'],\n",
    "                    'buys': row['buys'],\n",
    "                    'users': set(chunk[chunk['merchant_id']==mid]['user_id'].unique()),\n",
    "                    'unique_brands': row['unique_brands']\n",
    "                }\n",
    "        \n",
    "        del chunk, um_chunk, user_chunk, merchant_chunk\n",
    "        gc.collect()\n",
    "    \n",
    "    # 合并用户-商家特征\n",
    "    user_merchant_features = pd.concat(user_merchant_features, axis=0)\n",
    "    user_merchant_features = user_merchant_features.groupby(['user_id', 'merchant_id']).sum().reset_index()\n",
    "    \n",
    "    # 计算比例特征\n",
    "    user_merchant_features['buy_ratio'] = user_merchant_features['buys'] / (user_merchant_features['total_actions'] + 1e-5)\n",
    "    user_merchant_features['click_ratio'] = user_merchant_features['clicks'] / (user_merchant_features['total_actions'] + 1e-5)\n",
    "    user_merchant_features['collect_ratio'] = user_merchant_features['collects'] / (user_merchant_features['total_actions'] + 1e-5)\n",
    "    user_merchant_features['cart_ratio'] = user_merchant_features['carts'] / (user_merchant_features['total_actions'] + 1e-5)\n",
    "    user_merchant_features['action_time_span'] = (user_merchant_features['last_action_time'] - user_merchant_features['first_action_time']) / (24*3600)\n",
    "    \n",
    "    # 处理用户特征\n",
    "    user_features = pd.DataFrame([\n",
    "        {'user_id': uid, 'user_total_actions': data['total_actions'], \n",
    "         'user_buys': data['buys'], 'user_unique_merchants': len(data['merchants']),\n",
    "         'user_active_days': (data['last_action'] - data['first_action']) / (24*3600) + 1}\n",
    "        for uid, data in user_features_dict.items()\n",
    "    ])\n",
    "    user_features['user_buy_ratio'] = user_features['user_buys'] / (user_features['user_total_actions'] + 1e-5)\n",
    "    user_features['user_daily_actions'] = user_features['user_total_actions'] / (user_features['user_active_days'] + 1e-5)\n",
    "    \n",
    "    # 处理商家特征\n",
    "    merchant_features = pd.DataFrame([\n",
    "        {'merchant_id': mid, 'merchant_total_actions': data['total_actions'],\n",
    "         'merchant_buys': data['buys'], 'merchant_unique_users': len(data['users']),\n",
    "         'merchant_unique_brands': data['unique_brands']}\n",
    "        for mid, data in merchant_features_dict.items()\n",
    "    ])\n",
    "    merchant_features['merchant_buy_ratio'] = merchant_features['merchant_buys'] / (merchant_features['merchant_total_actions'] + 1e-5)\n",
    "    merchant_features['merchant_user_diversity'] = merchant_features['merchant_unique_users'] / (merchant_features['merchant_total_actions'] + 1e-5)\n",
    "    \n",
    "    # 读取用户基本信息\n",
    "    if user_info_file and os.path.exists(user_info_file):\n",
    "        user_info = pd.read_csv(user_info_file)\n",
    "        user_info['gender'] = user_info['gender'].fillna(2)\n",
    "        user_info['age_range'] = user_info['age_range'].fillna(0)\n",
    "    \n",
    "    # 合并特征\n",
    "    features = samples.merge(user_merchant_features, on=['user_id', 'merchant_id'], how='left')\n",
    "    features = features.merge(user_features, on='user_id', how='left')\n",
    "    features = features.merge(merchant_features, on='merchant_id', how='left')\n",
    "    \n",
    "    if user_info_file and os.path.exists(user_info_file):\n",
    "        features = features.merge(user_info, on='user_id', how='left')\n",
    "    \n",
    "    features.fillna(0, inplace=True)\n",
    "    \n",
    "    if need_label:\n",
    "        features['label'] = samples['label']\n",
    "    \n",
    "    if out_file:\n",
    "        features.to_csv(out_file, index=False)\n",
    "        print(f\"已保存处理后的数据到: {out_file}\")\n",
    "    \n",
    "    return features\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "id": "89282d59",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. 大幅增强的特征工程\n",
    "def enhanced_feature_engineering(data):\n",
    "    \"\"\"大幅增强的特征工程，添加更多深度特征\"\"\"\n",
    "    # 原有基础特征\n",
    "    if all(col in data.columns for col in ['user_total_actions', 'user_unique_merchants']):\n",
    "        data['user_engagement'] = np.log1p(data['user_total_actions']) * np.log1p(data['user_unique_merchants'])\n",
    "        data['user_merchant_diversity'] = data['user_unique_merchants'] / (data['user_total_actions'] + 1e-5)\n",
    "    \n",
    "    if all(col in data.columns for col in ['carts', 'collects', 'buys']):\n",
    "        data['pre_purchase_actions'] = data['carts'] + data['collects']\n",
    "        data['purchase_conversion'] = data['buys'] / (data['pre_purchase_actions'] + 1e-5)\n",
    "        \n",
    "    # 深度用户特征\n",
    "    if all(col in data.columns for col in ['user_total_actions', 'user_buys', 'user_active_days']):\n",
    "        data['user_purchase_intensity'] = data['user_buys'] / (data['user_active_days'] + 1e-5)\n",
    "        data['user_activity_level'] = np.log1p(data['user_total_actions']) / np.log1p(data['user_active_days'] + 1)\n",
    "        data['user_purchase_rate'] = data['user_buys'] / (data['user_total_actions'] + 1e-5)\n",
    "        data['user_loyalty_score'] = data['user_active_days'] * data['user_purchase_intensity']\n",
    "    \n",
    "    # 深度商家特征\n",
    "    if all(col in data.columns for col in ['merchant_total_actions', 'merchant_unique_users', 'merchant_buys']):\n",
    "        data['merchant_popularity_score'] = np.log1p(data['merchant_total_actions']) * np.log1p(data['merchant_unique_users'])\n",
    "        data['merchant_conversion_rate'] = data['merchant_buys'] / (data['merchant_total_actions'] + 1e-5)\n",
    "        data['merchant_user_engagement'] = data['merchant_total_actions'] / (data['merchant_unique_users'] + 1e-5)\n",
    "    \n",
    "    # 高级交互特征\n",
    "    if all(col in data.columns for col in ['total_actions', 'user_total_actions', 'merchant_total_actions']):\n",
    "        data['user_merchant_interaction_ratio'] = data['total_actions'] / (data['user_total_actions'] + 1e-5)\n",
    "        data['merchant_user_interaction_ratio'] = data['total_actions'] / (data['merchant_total_actions'] + 1e-5)\n",
    "        data['interaction_strength'] = data['total_actions'] / (np.sqrt(data['user_total_actions'] * data['merchant_total_actions']) + 1e-5)\n",
    "    \n",
    "    # 时间维度深度特征\n",
    "    if 'action_time_span' in data.columns:\n",
    "        data['is_repeat_customer'] = (data['action_time_span'] > 1).astype(int)\n",
    "        data['interaction_frequency'] = data['total_actions'] / (data['action_time_span'] + 1e-5)\n",
    "        data['time_concentration'] = 1 / (data['action_time_span'] + 1)\n",
    "        data['loyalty_indicator'] = data['action_time_span'] * data['total_actions']\n",
    "    \n",
    "    # 行为模式深度匹配\n",
    "    if all(col in data.columns for col in ['user_buy_ratio', 'buy_ratio', 'merchant_buy_ratio']):\n",
    "        data['buy_behavior_match'] = 1 / (1 + np.abs(data['user_buy_ratio'] - data['buy_ratio']))\n",
    "        data['user_merchant_buy_compatibility'] = 1 / (1 + np.abs(data['user_buy_ratio'] - data['merchant_buy_ratio']))\n",
    "        data['behavior_alignment_score'] = (data['buy_behavior_match'] + data['user_merchant_buy_compatibility']) / 2\n",
    "    \n",
    "    # 统计特征增强\n",
    "    if all(col in data.columns for col in ['clicks', 'buys', 'collects', 'carts']):\n",
    "        data['action_diversity'] = (data[['clicks', 'buys', 'collects', 'carts']] > 0).sum(axis=1)\n",
    "        data['click_to_buy_ratio'] = data['buys'] / (data['clicks'] + 1e-5)\n",
    "        data['collect_to_buy_ratio'] = data['buys'] / (data['collects'] + 1e-5)\n",
    "        data['cart_to_buy_ratio'] = data['buys'] / (data['carts'] + 1e-5)\n",
    "        data['browse_to_action_ratio'] = (data['collects'] + data['carts']) / (data['clicks'] + 1e-5)\n",
    "        \n",
    "        # 行为序列特征\n",
    "        data['action_efficiency'] = data['buys'] / (data['clicks'] + data['collects'] + data['carts'] + 1e-5)\n",
    "        data['purchase_funnel_completion'] = data['buys'] * 4 / (data['clicks'] + data['collects'] + data['carts'] + data['buys'] + 1e-5)\n",
    "    \n",
    "    # 相对特征增强\n",
    "    if all(col in data.columns for col in ['user_daily_actions', 'merchant_user_diversity']):\n",
    "        data['user_merchant_match_score'] = data['user_daily_actions'] * data['merchant_user_diversity']\n",
    "    \n",
    "    if all(col in data.columns for col in ['unique_brands', 'merchant_unique_brands']):\n",
    "        data['brand_coverage_ratio'] = data['unique_brands'] / (data['merchant_unique_brands'] + 1e-5)\n",
    "        data['brand_exploration_score'] = np.log1p(data['unique_brands']) / np.log1p(data['merchant_unique_brands'] + 1)\n",
    "    \n",
    "    # 用户价值分层特征\n",
    "    if 'user_total_actions' in data.columns:\n",
    "        data['user_activity_level_cat'] = pd.cut(data['user_total_actions'], \n",
    "                                                 bins=[0, 10, 50, 200, np.inf], \n",
    "                                                 labels=[0, 1, 2, 3]).astype(int)\n",
    "    \n",
    "    if 'merchant_total_actions' in data.columns:\n",
    "        data['merchant_popularity_level'] = pd.cut(data['merchant_total_actions'], \n",
    "                                                  bins=[0, 100, 1000, 5000, np.inf], \n",
    "                                                  labels=[0, 1, 2, 3]).astype(int)\n",
    "    \n",
    "    # 比例特征的非线性变换\n",
    "    ratio_cols = ['buy_ratio', 'click_ratio', 'collect_ratio', 'cart_ratio']\n",
    "    for col in ratio_cols:\n",
    "        if col in data.columns:\n",
    "            data[f'{col}_log'] = np.log1p(data[col])\n",
    "            data[f'{col}_sqrt'] = np.sqrt(data[col])\n",
    "            data[f'{col}_squared'] = data[col] ** 2\n",
    "    \n",
    "    # 交互特征组合\n",
    "    if all(col in data.columns for col in ['user_engagement', 'merchant_popularity_score']):\n",
    "        data['engagement_popularity_product'] = data['user_engagement'] * data['merchant_popularity_score']\n",
    "        data['engagement_popularity_ratio'] = data['user_engagement'] / (data['merchant_popularity_score'] + 1e-5)\n",
    "    \n",
    "    # 处理类别特征\n",
    "    if 'gender' in data.columns:\n",
    "        data['gender'] = data['gender'].astype(int)\n",
    "    if 'age_range' in data.columns:\n",
    "        data['age_range'] = data['age_range'].astype(int)\n",
    "        # 年龄相关交互特征\n",
    "        if 'user_buy_ratio' in data.columns:\n",
    "            data['age_purchase_interaction'] = data['age_range'] * data['user_buy_ratio']\n",
    "    \n",
    "    return data\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "bd0751b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. 进一步优化的特征选择函数\n",
    "def feature_selection(data, target='label', num_features=30):\n",
    "    \"\"\"进一步优化的特征选择，使用多种方法结合\"\"\"\n",
    "    exclude_cols = ['user_id', 'merchant_id', target]\n",
    "    X = data.select_dtypes(include=np.number).drop(columns=exclude_cols, errors='ignore')\n",
    "    y = data[target]\n",
    "    \n",
    "    # 方法1: 基于LightGBM的特征重要性\n",
    "    model1 = lgb.LGBMClassifier(\n",
    "        n_estimators=200, \n",
    "        learning_rate=0.1,\n",
    "        num_leaves=50,\n",
    "        random_state=42, \n",
    "        verbose=-1\n",
    "    )\n",
    "    model1.fit(X, y)\n",
    "    \n",
    "    importance1 = pd.DataFrame({\n",
    "        'feature': X.columns,\n",
    "        'importance_lgb': model1.feature_importances_\n",
    "    })\n",
    "    \n",
    "    # 方法2: 基于XGBoost的特征重要性\n",
    "    model2 = xgb.XGBClassifier(\n",
    "        n_estimators=200,\n",
    "        learning_rate=0.1,\n",
    "        random_state=42,\n",
    "        verbosity=0\n",
    "    )\n",
    "    model2.fit(X, y)\n",
    "    \n",
    "    importance2 = pd.DataFrame({\n",
    "        'feature': X.columns,\n",
    "        'importance_xgb': model2.feature_importances_\n",
    "    })\n",
    "    \n",
    "    # 合并重要性分数\n",
    "    feature_importance = importance1.merge(importance2, on='feature')\n",
    "    feature_importance['combined_importance'] = (\n",
    "        feature_importance['importance_lgb'] * 0.6 + \n",
    "        feature_importance['importance_xgb'] * 0.4\n",
    "    )\n",
    "    feature_importance = feature_importance.sort_values('combined_importance', ascending=False)\n",
    "    \n",
    "    print(f\"Top 15 重要特征:\")\n",
    "    print(feature_importance.head(15)[['feature', 'combined_importance']])\n",
    "    \n",
    "    # 选择重要性最高的特征\n",
    "    selected_features = feature_importance.head(num_features)['feature'].tolist()\n",
    "    \n",
    "    return selected_features\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "6e060cad",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 4. 改进的模型训练函数\n",
    "def advanced_model_training(train_data, features, target='label'):\n",
    "    \"\"\"改进的模型训练，优化参数搜索\"\"\"\n",
    "    X = train_data[features]\n",
    "    y = train_data[target]\n",
    "    \n",
    "    # 使用更保守的SMOTE参数\n",
    "    smote = SMOTE(sampling_strategy=0.3, random_state=42, k_neighbors=3)\n",
    "    \n",
    "    model = lgb.LGBMClassifier(random_state=42, verbose=-1)\n",
    "    \n",
    "    # 更精细的参数网格\n",
    "    param_grid = {\n",
    "        'model__n_estimators': [200, 300, 500],\n",
    "        'model__learning_rate': [0.05, 0.08, 0.1],\n",
    "        'model__num_leaves': [31, 50, 70],\n",
    "        'model__max_depth': [6, 8, 10],\n",
    "        'model__feature_fraction': [0.8, 0.9],\n",
    "        'model__bagging_fraction': [0.8, 0.9],\n",
    "        'model__reg_alpha': [0, 0.1],\n",
    "        'model__reg_lambda': [0, 0.1]\n",
    "    }\n",
    "    \n",
    "    pipeline = ImbPipeline([\n",
    "        ('smote', smote),\n",
    "        ('model', model)\n",
    "    ])\n",
    "    \n",
    "    # 使用更多折交叉验证\n",
    "    grid_search = GridSearchCV(\n",
    "        estimator=pipeline,\n",
    "        param_grid=param_grid,\n",
    "        cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=42),\n",
    "        scoring='roc_auc',\n",
    "        n_jobs=1,\n",
    "        verbose=1\n",
    "    )\n",
    "    \n",
    "    grid_search.fit(X, y)\n",
    "    \n",
    "    print(f\"最佳参数: {grid_search.best_params_}\")\n",
    "    print(f\"最佳AUC: {grid_search.best_score_:.4f}\")\n",
    "    \n",
    "    return grid_search.best_estimator_, grid_search.best_score_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "d7066212",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 5. 优化的集成模型训练函数\n",
    "def ensemble_model_training(train_data, features, target='label'):\n",
    "    \"\"\"优化的集成多个模型以提高性能\"\"\"\n",
    "    X = train_data[features]\n",
    "    y = train_data[target]\n",
    "    \n",
    "    # 数据划分\n",
    "    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)\n",
    "    \n",
    "    # 优化SMOTE参数 - 更激进的重采样\n",
    "    smote = SMOTE(sampling_strategy=0.4, random_state=42, k_neighbors=5)\n",
    "    X_train_balanced, y_train_balanced = smote.fit_resample(X_train, y_train)\n",
    "    \n",
    "    print(f\"重采样后的数据分布: {np.bincount(y_train_balanced)}\")\n",
    "    \n",
    "    # 定义多个优化的模型\n",
    "    models = {}\n",
    "    \n",
    "    # 优化的LightGBM模型\n",
    "    lgb_model = lgb.LGBMClassifier(\n",
    "        n_estimators=500,\n",
    "        learning_rate=0.05,\n",
    "        num_leaves=64,\n",
    "        max_depth=10,\n",
    "        feature_fraction=0.8,\n",
    "        bagging_fraction=0.8,\n",
    "        bagging_freq=5,\n",
    "        reg_alpha=0.1,\n",
    "        reg_lambda=0.1,\n",
    "        min_child_samples=20,\n",
    "        class_weight='balanced',\n",
    "        random_state=42,\n",
    "        verbose=-1\n",
    "    )\n",
    "    lgb_model.fit(X_train_balanced, y_train_balanced)\n",
    "    models['lgb'] = lgb_model\n",
    "    \n",
    "    # 优化的XGBoost模型\n",
    "    xgb_model = xgb.XGBClassifier(\n",
    "        n_estimators=500,\n",
    "        learning_rate=0.05,\n",
    "        max_depth=10,\n",
    "        subsample=0.8,\n",
    "        colsample_bytree=0.8,\n",
    "        reg_alpha=0.1,\n",
    "        reg_lambda=0.1,\n",
    "        min_child_weight=5,\n",
    "        scale_pos_weight=len(y_train_balanced[y_train_balanced==0])/len(y_train_balanced[y_train_balanced==1]),\n",
    "        random_state=42,\n",
    "        verbosity=0\n",
    "    )\n",
    "    xgb_model.fit(X_train_balanced, y_train_balanced)\n",
    "    models['xgb'] = xgb_model\n",
    "    \n",
    "    # 优化的随机森林模型\n",
    "    rf_model = RandomForestClassifier(\n",
    "        n_estimators=300,\n",
    "        max_depth=20,\n",
    "        min_samples_split=5,\n",
    "        min_samples_leaf=2,\n",
    "        max_features='sqrt',\n",
    "        class_weight='balanced',\n",
    "        random_state=42,\n",
    "        n_jobs=1\n",
    "    )\n",
    "    rf_model.fit(X_train_balanced, y_train_balanced)\n",
    "    models['rf'] = rf_model\n",
    "    \n",
    "    # 评估各个模型\n",
    "    model_scores = {}\n",
    "    for name, model in models.items():\n",
    "        val_pred = model.predict_proba(X_val)[:, 1]\n",
    "        score = roc_auc_score(y_val, val_pred)\n",
    "        model_scores[name] = score\n",
    "        print(f\"{name.upper()} 验证AUC: {score:.4f}\")\n",
    "        \n",
    "        # 输出预测分布\n",
    "        val_pred_binary = (val_pred > 0.3).astype(int)  # 降低阈值\n",
    "        print(f\"{name.upper()} 预测正样本数: {val_pred_binary.sum()}\")\n",
    "    \n",
    "    # 创建集成模型\n",
    "    class EnsembleModel:\n",
    "        def __init__(self, models, weights):\n",
    "            self.models = models\n",
    "            self.weights = weights\n",
    "            \n",
    "        def predict_proba(self, X):\n",
    "            predictions = []\n",
    "            for name, model in self.models.items():\n",
    "                pred = model.predict_proba(X)[:, 1]\n",
    "                predictions.append(pred * self.weights[name])\n",
    "            \n",
    "            ensemble_pred = np.sum(predictions, axis=0)\n",
    "            return np.vstack([1 - ensemble_pred, ensemble_pred]).T\n",
    "    \n",
    "    # 优化权重分配 - 给表现最好的模型更高权重\n",
    "    max_score = max(model_scores.values())\n",
    "    weights = {}\n",
    "    for name, score in model_scores.items():\n",
    "        if score == max_score:\n",
    "            weights[name] = 0.5  # 最好的模型权重50%\n",
    "        else:\n",
    "            weights[name] = 0.5 * (score / max_score) / (len(model_scores) - 1)\n",
    "    \n",
    "    # 归一化权重\n",
    "    total_weight = sum(weights.values())\n",
    "    weights = {name: w / total_weight for name, w in weights.items()}\n",
    "    \n",
    "    ensemble = EnsembleModel(models, weights)\n",
    "    \n",
    "    # 评估集成模型\n",
    "    ensemble_pred = ensemble.predict_proba(X_val)[:, 1]\n",
    "    ensemble_score = roc_auc_score(y_val, ensemble_pred)\n",
    "    print(f\"\\n集成模型验证AUC: {ensemble_score:.4f}\")\n",
    "    print(f\"优化后的模型权重: {weights}\")\n",
    "    \n",
    "    # 使用更低的阈值进行预测\n",
    "    ensemble_pred_binary = (ensemble_pred > 0.3).astype(int)\n",
    "    print(f\"集成模型预测正样本数: {ensemble_pred_binary.sum()}\")\n",
    "    \n",
    "    return ensemble, ensemble_score\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "1d3352ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 6. 优化的预测函数\n",
    "def make_predictions(model, data, features, out_file=None):\n",
    "    \"\"\"使用训练好的模型进行预测，调整预测阈值\"\"\"\n",
    "    predictions = data[['user_id', 'merchant_id']].copy()\n",
    "    \n",
    "    for feature in features:\n",
    "        if feature not in data.columns:\n",
    "            data[feature] = 0\n",
    "    \n",
    "    predictions['prob'] = model.predict_proba(data[features])[:, 1]\n",
    "    \n",
    "    # 动态阈值选择 - 基于概率分布调整\n",
    "    prob_median = predictions['prob'].median()\n",
    "    prob_75 = predictions['prob'].quantile(0.75)\n",
    "    prob_90 = predictions['prob'].quantile(0.90)\n",
    "    \n",
    "    print(f\"概率分布统计:\")\n",
    "    print(f\"中位数: {prob_median:.4f}\")\n",
    "    print(f\"75分位数: {prob_75:.4f}\")\n",
    "    print(f\"90分位数: {prob_90:.4f}\")\n",
    "    \n",
    "    # 使用更智能的阈值策略\n",
    "    if prob_90 > 0.4:\n",
    "        threshold = prob_75  # 如果90分位数较高，使用75分位数作为阈值\n",
    "    else:\n",
    "        threshold = prob_median  # 否则使用中位数作为阈值\n",
    "    \n",
    "    # 确保至少有一定比例的正样本\n",
    "    min_positive_ratio = 0.05  # 至少5%的正样本\n",
    "    sorted_probs = predictions['prob'].sort_values(ascending=False)\n",
    "    min_threshold = sorted_probs.iloc[int(len(sorted_probs) * min_positive_ratio)]\n",
    "    \n",
    "    final_threshold = min(threshold, min_threshold)\n",
    "    print(f\"最终使用的阈值: {final_threshold:.4f}\")\n",
    "    \n",
    "    predictions['label'] = (predictions['prob'] > final_threshold).astype(int)\n",
    "    \n",
    "    print(f\"预测正样本数: {predictions['label'].sum()}\")\n",
    "    print(f\"正样本比例: {predictions['label'].mean():.4f}\")\n",
    "    \n",
    "    if out_file:\n",
    "        predictions.to_csv(out_file, index=False)\n",
    "        print(f\"预测结果已保存到: {out_file}\")\n",
    "    \n",
    "    return predictions\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "9a0fbb4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 7. 主程序\n",
    "def main():\n",
    "    data_dir = f'{cwd}/data/'\n",
    "    tmp_dir = f'{cwd}/tmp/'\n",
    "    \n",
    "    train_file = os.path.join(data_dir, 'train.csv')\n",
    "    test_file = os.path.join(data_dir, 'test_without_label.csv')\n",
    "    logs_file = os.path.join(data_dir, 'user_log_format2.csv')\n",
    "    user_info_file = os.path.join(data_dir, 'user_info_format1.csv')\n",
    "    \n",
    "    train_out_file = os.path.join(tmp_dir, 'enhanced_train_v2.csv')\n",
    "    test_out_file = os.path.join(tmp_dir, 'enhanced_test_v2.csv')\n",
    "    result_file = os.path.join(tmp_dir, 'final_predictions_v2.csv')\n",
    "    \n",
    "    try:\n",
    "        print(\"=\"*50)\n",
    "        print(\"开始优化数据预处理...\")\n",
    "        train_data = enhanced_data_process(\n",
    "            train_file, logs_file, user_info_file, \n",
    "            need_label=True, out_file=train_out_file\n",
    "        )\n",
    "        test_data = enhanced_data_process(\n",
    "            test_file, logs_file, user_info_file, \n",
    "            need_label=False, out_file=test_out_file\n",
    "        )\n",
    "        \n",
    "        print(\"\\n进行增强特征工程...\")\n",
    "        train_data = enhanced_feature_engineering(train_data)\n",
    "        test_data = enhanced_feature_engineering(test_data)\n",
    "        \n",
    "        feature_columns = [col for col in train_data.columns \n",
    "                          if col not in ['user_id', 'merchant_id', 'label']]\n",
    "        \n",
    "        print(f\"\\n构建特征数量: {len(feature_columns)}\")\n",
    "        print(\"构建特征名称:\")\n",
    "        print(\",\".join(feature_columns))\n",
    "        \n",
    "        print(\"\\n进行优化特征选择...\")\n",
    "        selected_features = feature_selection(train_data, num_features=30)\n",
    "        print(f\"选择的{len(selected_features)}个特征: {selected_features}\")\n",
    "        \n",
    "        print(\"\\n训练集成模型...\")\n",
    "        model, auc_score = ensemble_model_training(train_data, selected_features)\n",
    "        print(f\"最终集成模型AUC: {auc_score:.4f}\")\n",
    "        \n",
    "        print(\"\\n进行预测...\")\n",
    "        result = make_predictions(model, test_data, selected_features, out_file=result_file)\n",
    "        \n",
    "        print(\"\\n预测结果统计:\")\n",
    "        print(result['label'].value_counts())\n",
    "        print(f\"预测概率分布:\")\n",
    "        print(f\"最小值: {result['prob'].min():.4f}\")\n",
    "        print(f\"最大值: {result['prob'].max():.4f}\")\n",
    "        print(f\"平均值: {result['prob'].mean():.4f}\")\n",
    "        print(f\"中位数: {result['prob'].median():.4f}\")\n",
    "        \n",
    "        print(\"\\n预测结果样例:\")\n",
    "        print(result.head(10))\n",
    "        print(f\"\\n预测完成! 结果已保存到 {result_file}\")\n",
    "        \n",
    "        # 额外的模型解释\n",
    "        print(\"\\n=\" * 50)\n",
    "        print(\"模型性能分析:\")\n",
    "        print(f\"- 使用了{len(selected_features)}个最重要的特征\")\n",
    "        print(f\"- 集成了LightGBM、XGBoost和随机森林三个模型\")\n",
    "        print(f\"- 使用5折交叉验证进行模型评估\")\n",
    "        print(f\"- 通过SMOTE技术处理类别不平衡问题\")\n",
    "        print(f\"- 最终AUC得分: {auc_score:.4f}\")\n",
    "        \n",
    "    except Exception as e:\n",
    "        print(f\"程序执行出错: {str(e)}\")\n",
    "        import traceback\n",
    "        traceback.print_exc()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "914f2f80",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================================================\n",
      "开始优化数据预处理...\n",
      "\n",
      "进行增强特征工程...\n",
      "\n",
      "构建特征数量: 75\n",
      "构建特征名称:\n",
      "total_actions,clicks,buys,collects,carts,unique_brands,first_action_time,last_action_time,buy_ratio,click_ratio,collect_ratio,cart_ratio,action_time_span,user_total_actions,user_buys,user_unique_merchants,user_active_days,user_buy_ratio,user_daily_actions,merchant_total_actions,merchant_buys,merchant_unique_users,merchant_unique_brands,merchant_buy_ratio,merchant_user_diversity,age_range,gender,user_engagement,user_merchant_diversity,pre_purchase_actions,purchase_conversion,user_purchase_intensity,user_activity_level,user_purchase_rate,user_loyalty_score,merchant_popularity_score,merchant_conversion_rate,merchant_user_engagement,user_merchant_interaction_ratio,merchant_user_interaction_ratio,interaction_strength,is_repeat_customer,interaction_frequency,time_concentration,loyalty_indicator,buy_behavior_match,user_merchant_buy_compatibility,behavior_alignment_score,action_diversity,click_to_buy_ratio,collect_to_buy_ratio,cart_to_buy_ratio,browse_to_action_ratio,action_efficiency,purchase_funnel_completion,user_merchant_match_score,brand_coverage_ratio,brand_exploration_score,user_activity_level_cat,merchant_popularity_level,buy_ratio_log,buy_ratio_sqrt,buy_ratio_squared,click_ratio_log,click_ratio_sqrt,click_ratio_squared,collect_ratio_log,collect_ratio_sqrt,collect_ratio_squared,cart_ratio_log,cart_ratio_sqrt,cart_ratio_squared,engagement_popularity_product,engagement_popularity_ratio,age_purchase_interaction\n",
      "\n",
      "进行优化特征选择...\n",
      "Top 15 重要特征:\n",
      "                            feature  combined_importance\n",
      "42            interaction_frequency           279.005244\n",
      "23               merchant_buy_ratio           273.008710\n",
      "24          merchant_user_diversity           259.209705\n",
      "21            merchant_unique_users           252.010979\n",
      "40             interaction_strength           246.605910\n",
      "55        user_merchant_match_score           246.605189\n",
      "72    engagement_popularity_product           241.805923\n",
      "73      engagement_popularity_ratio           241.205073\n",
      "39  merchant_user_interaction_ratio           235.206039\n",
      "28          user_merchant_diversity           214.205428\n",
      "16                 user_active_days           202.205635\n",
      "25                        age_range           198.005411\n",
      "38  user_merchant_interaction_ratio           194.405582\n",
      "20                    merchant_buys           193.210824\n",
      "19           merchant_total_actions           186.009413\n",
      "选择的30个特征: ['interaction_frequency', 'merchant_buy_ratio', 'merchant_user_diversity', 'merchant_unique_users', 'interaction_strength', 'user_merchant_match_score', 'engagement_popularity_product', 'engagement_popularity_ratio', 'merchant_user_interaction_ratio', 'user_merchant_diversity', 'user_active_days', 'age_range', 'user_merchant_interaction_ratio', 'merchant_buys', 'merchant_total_actions', 'user_merchant_buy_compatibility', 'collect_ratio', 'browse_to_action_ratio', 'loyalty_indicator', 'click_ratio', 'merchant_user_engagement', 'user_daily_actions', 'user_engagement', 'merchant_popularity_score', 'user_total_actions', 'cart_ratio', 'first_action_time', 'gender', 'collects', 'total_actions']\n",
      "\n",
      "训练集成模型...\n",
      "重采样后的数据分布: [173394  69357]\n",
      "LGB 验证AUC: 0.6140\n",
      "LGB 预测正样本数: 7797\n",
      "XGB 验证AUC: 0.5993\n",
      "XGB 预测正样本数: 6312\n",
      "RF 验证AUC: 0.5744\n",
      "RF 预测正样本数: 12699\n",
      "\n",
      "集成模型验证AUC: 0.6081\n",
      "优化后的模型权重: {'lgb': 0.5113053713939978, 'xgb': 0.24953972079519823, 'rf': 0.23915490781080395}\n",
      "集成模型预测正样本数: 7991\n",
      "最终集成模型AUC: 0.6081\n",
      "\n",
      "进行预测...\n",
      "概率分布统计:\n",
      "中位数: 0.1690\n",
      "75分位数: 0.2589\n",
      "90分位数: 0.3658\n",
      "最终使用的阈值: 0.1690\n",
      "预测正样本数: 15000\n",
      "正样本比例: 0.5000\n",
      "预测结果已保存到: F:/H/jqxx-master/hjh/tmp/final_predictions_v2.csv\n",
      "\n",
      "预测结果统计:\n",
      "0    15000\n",
      "1    15000\n",
      "Name: label, dtype: int64\n",
      "预测概率分布:\n",
      "最小值: 0.0181\n",
      "最大值: 0.8615\n",
      "平均值: 0.2007\n",
      "中位数: 0.1690\n",
      "\n",
      "预测结果样例:\n",
      "   user_id  merchant_id      prob  label\n",
      "0    34176         3906  0.152284      0\n",
      "1    34176          121  0.075624      0\n",
      "2    34176         4356  0.147236      0\n",
      "3    34176         2217  0.139550      0\n",
      "4   230784         4818  0.288273      1\n",
      "5   362112         2618  0.098280      0\n",
      "6    34944         2051  0.436804      1\n",
      "7   231552         3828  0.492496      1\n",
      "8   231552         2124  0.052780      0\n",
      "9   232320         1168  0.094248      0\n",
      "\n",
      "预测完成! 结果已保存到 F:/H/jqxx-master/hjh/tmp/final_predictions_v2.csv\n",
      "\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "=\n",
      "模型性能分析:\n",
      "- 使用了30个最重要的特征\n",
      "- 集成了LightGBM、XGBoost和随机森林三个模型\n",
      "- 使用5折交叉验证进行模型评估\n",
      "- 通过SMOTE技术处理类别不平衡问题\n",
      "- 最终AUC得分: 0.6081\n"
     ]
    }
   ],
   "source": [
    "# 执行主程序\n",
    "if __name__ == '__main__':\n",
    "    main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "79129e2b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
