{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import gc\n",
    "import os\n",
    "import random\n",
    "import warnings\n",
    "\n",
    "import joblib\n",
    "import lightgbm as lgb\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import GroupKFold\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "seed = 2020\n",
    "random.seed(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(df_feature, df_query):\n",
    "    df_train = df_feature[df_feature['label'].notnull()]\n",
    "    df_test = df_feature[df_feature['label'].isnull()]\n",
    "\n",
    "    del df_feature\n",
    "    gc.collect()\n",
    "\n",
    "    ycol = 'label'\n",
    "    feature_names = list(\n",
    "        filter(\n",
    "            lambda x: x not in [ycol, 'created_at_datetime', 'click_datetime'],\n",
    "            df_train.columns))\n",
    "    feature_names.sort()\n",
    "\n",
    "    model = lgb.LGBMClassifier(num_leaves=64,\n",
    "                               max_depth=10,\n",
    "                               learning_rate=0.05,\n",
    "                               n_estimators=10000,\n",
    "                               subsample=0.8,\n",
    "                               feature_fraction=0.8,\n",
    "                               reg_alpha=0.5,\n",
    "                               reg_lambda=0.5,\n",
    "                               random_state=seed,\n",
    "                               importance_type='gain',\n",
    "                               metric=None)\n",
    "\n",
    "    oof = []\n",
    "    prediction = df_test[['user_id', 'article_id']]\n",
    "    prediction['pred'] = 0\n",
    "    df_importance_list = []\n",
    "\n",
    "    # 训练模型\n",
    "    kfold = GroupKFold(n_splits=5)\n",
    "    for fold_id, (trn_idx, val_idx) in enumerate(\n",
    "            kfold.split(df_train[feature_names], df_train[ycol],\n",
    "                        df_train['user_id'])):\n",
    "        X_train = df_train.iloc[trn_idx][feature_names]\n",
    "        Y_train = df_train.iloc[trn_idx][ycol]\n",
    "\n",
    "        X_val = df_train.iloc[val_idx][feature_names]\n",
    "        Y_val = df_train.iloc[val_idx][ycol]\n",
    "\n",
    "        \n",
    "        print(    f'\\nFold_{fold_id + 1} Training ================================\\n')\n",
    "        \n",
    "\n",
    "        lgb_model = model.fit(X_train,\n",
    "                              Y_train,\n",
    "                              eval_names=['train', 'valid'],\n",
    "                              eval_set=[(X_train, Y_train), (X_val, Y_val)],\n",
    "                              verbose=100,\n",
    "                              eval_metric='auc',\n",
    "                              early_stopping_rounds=100)\n",
    "\n",
    "        pred_val = lgb_model.predict_proba(\n",
    "            X_val, num_iteration=lgb_model.best_iteration_)[:, 1]\n",
    "        df_oof = df_train.iloc[val_idx][['user_id', 'article_id', ycol]].copy()\n",
    "        df_oof['pred'] = pred_val\n",
    "        oof.append(df_oof)\n",
    "\n",
    "        pred_test = lgb_model.predict_proba(\n",
    "            df_test[feature_names], num_iteration=lgb_model.best_iteration_)[:,\n",
    "                                                                             1]\n",
    "        prediction['pred'] += pred_test / 5\n",
    "\n",
    "        df_importance = pd.DataFrame({\n",
    "            'feature_name':\n",
    "            feature_names,\n",
    "            'importance':\n",
    "            lgb_model.feature_importances_,\n",
    "        })\n",
    "        df_importance_list.append(df_importance)\n",
    "\n",
    "        joblib.dump(model, f'./user_data/model/lgb{fold_id}.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def online_predict(df_test):\n",
    "    ycol = 'label'\n",
    "    feature_names = list(\n",
    "        filter(\n",
    "            lambda x: x not in [ycol, 'created_at_datetime', 'click_datetime'],\n",
    "            df_test.columns))\n",
    "    feature_names.sort()\n",
    "\n",
    "    prediction = df_test[['user_id', 'article_id']]\n",
    "    prediction['pred'] = 0\n",
    "\n",
    "    for fold_id in tqdm(range(5)):\n",
    "        model = joblib.load(f'../user_data/model/lgb{fold_id}.pkl')\n",
    "        pred_test = model.predict_proba(df_test[feature_names])[:, 1]\n",
    "        prediction['pred'] += pred_test / 5\n",
    "\n",
    "    # 生成提交文件\n",
    "    df_sub = gen_sub(prediction)\n",
    "    df_sub.sort_values(['user_id'], inplace=True)\n",
    "    os.makedirs('./prediction_result', exist_ok=True)\n",
    "    df_sub.to_csv(f'./prediction_result/result.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_feature = pd.read_pickle('./user_data/data/offline/feature.pkl')\n",
    "df_query = pd.read_pickle('./user_data/data/offline/query.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "for f in df_feature.select_dtypes('object').columns:\n",
    "    lbl = LabelEncoder()\n",
    "    df_feature[f] = lbl.fit_transform(df_feature[f].astype(str))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Fold_1 Training ================================\n",
      "\n",
      "[LightGBM] [Warning] feature_fraction is set=0.8, colsample_bytree=1.0 will be ignored. Current value: feature_fraction=0.8\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\ttrain's auc: 0.950347\tvalid's auc: 0.941918\n",
      "[200]\ttrain's auc: 0.956802\tvalid's auc: 0.943906\n",
      "[300]\ttrain's auc: 0.96152\tvalid's auc: 0.944691\n",
      "[400]\ttrain's auc: 0.965505\tvalid's auc: 0.945129\n",
      "[500]\ttrain's auc: 0.968714\tvalid's auc: 0.945435\n",
      "[600]\ttrain's auc: 0.971498\tvalid's auc: 0.945603\n",
      "[700]\ttrain's auc: 0.973979\tvalid's auc: 0.945834\n",
      "[800]\ttrain's auc: 0.976296\tvalid's auc: 0.945943\n",
      "[900]\ttrain's auc: 0.978208\tvalid's auc: 0.94604\n",
      "[1000]\ttrain's auc: 0.980099\tvalid's auc: 0.946113\n",
      "Early stopping, best iteration is:\n",
      "[998]\ttrain's auc: 0.980065\tvalid's auc: 0.946122\n",
      "\n",
      "Fold_2 Training ================================\n",
      "\n",
      "[LightGBM] [Warning] feature_fraction is set=0.8, colsample_bytree=1.0 will be ignored. Current value: feature_fraction=0.8\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\ttrain's auc: 0.950479\tvalid's auc: 0.942019\n",
      "[200]\ttrain's auc: 0.956723\tvalid's auc: 0.943871\n",
      "[300]\ttrain's auc: 0.961649\tvalid's auc: 0.944523\n",
      "[400]\ttrain's auc: 0.965772\tvalid's auc: 0.94496\n",
      "[500]\ttrain's auc: 0.969146\tvalid's auc: 0.945166\n",
      "[600]\ttrain's auc: 0.972056\tvalid's auc: 0.945215\n",
      "[700]\ttrain's auc: 0.974756\tvalid's auc: 0.945319\n",
      "[800]\ttrain's auc: 0.976908\tvalid's auc: 0.945379\n",
      "[900]\ttrain's auc: 0.978758\tvalid's auc: 0.945437\n",
      "[1000]\ttrain's auc: 0.980325\tvalid's auc: 0.945435\n",
      "Early stopping, best iteration is:\n",
      "[964]\ttrain's auc: 0.979784\tvalid's auc: 0.945457\n",
      "\n",
      "Fold_3 Training ================================\n",
      "\n",
      "[LightGBM] [Warning] feature_fraction is set=0.8, colsample_bytree=1.0 will be ignored. Current value: feature_fraction=0.8\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\ttrain's auc: 0.949977\tvalid's auc: 0.944479\n",
      "[200]\ttrain's auc: 0.956309\tvalid's auc: 0.946361\n",
      "[300]\ttrain's auc: 0.961185\tvalid's auc: 0.947037\n",
      "[400]\ttrain's auc: 0.965261\tvalid's auc: 0.947352\n",
      "[500]\ttrain's auc: 0.968449\tvalid's auc: 0.947644\n",
      "[600]\ttrain's auc: 0.971362\tvalid's auc: 0.947851\n",
      "[700]\ttrain's auc: 0.973842\tvalid's auc: 0.947913\n",
      "[800]\ttrain's auc: 0.976044\tvalid's auc: 0.947996\n",
      "[900]\ttrain's auc: 0.977998\tvalid's auc: 0.94805\n",
      "Early stopping, best iteration is:\n",
      "[895]\ttrain's auc: 0.977883\tvalid's auc: 0.948053\n",
      "\n",
      "Fold_4 Training ================================\n",
      "\n",
      "[LightGBM] [Warning] feature_fraction is set=0.8, colsample_bytree=1.0 will be ignored. Current value: feature_fraction=0.8\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\ttrain's auc: 0.950123\tvalid's auc: 0.944169\n",
      "[200]\ttrain's auc: 0.956643\tvalid's auc: 0.945955\n",
      "[300]\ttrain's auc: 0.961434\tvalid's auc: 0.946508\n",
      "[400]\ttrain's auc: 0.96558\tvalid's auc: 0.946873\n",
      "[500]\ttrain's auc: 0.969003\tvalid's auc: 0.947022\n",
      "[600]\ttrain's auc: 0.971749\tvalid's auc: 0.947148\n",
      "[700]\ttrain's auc: 0.974214\tvalid's auc: 0.947201\n",
      "Early stopping, best iteration is:\n",
      "[677]\ttrain's auc: 0.973706\tvalid's auc: 0.947232\n",
      "\n",
      "Fold_5 Training ================================\n",
      "\n",
      "[LightGBM] [Warning] feature_fraction is set=0.8, colsample_bytree=1.0 will be ignored. Current value: feature_fraction=0.8\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\ttrain's auc: 0.950556\tvalid's auc: 0.941608\n",
      "[200]\ttrain's auc: 0.956854\tvalid's auc: 0.94351\n",
      "[300]\ttrain's auc: 0.961812\tvalid's auc: 0.944247\n",
      "[400]\ttrain's auc: 0.966068\tvalid's auc: 0.944554\n",
      "[500]\ttrain's auc: 0.969297\tvalid's auc: 0.944658\n",
      "[600]\ttrain's auc: 0.972171\tvalid's auc: 0.944839\n",
      "[700]\ttrain's auc: 0.974684\tvalid's auc: 0.944869\n",
      "[800]\ttrain's auc: 0.976858\tvalid's auc: 0.944931\n",
      "[900]\ttrain's auc: 0.978864\tvalid's auc: 0.944937\n",
      "[1000]\ttrain's auc: 0.980627\tvalid's auc: 0.944911\n",
      "Early stopping, best iteration is:\n",
      "[937]\ttrain's auc: 0.979541\tvalid's auc: 0.944981\n"
     ]
    }
   ],
   "source": [
    "train_model(df_feature, df_query)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'df_importance_list' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_784/2233943040.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;31m# 特征重要性\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mdf_importance\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconcat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf_importance_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      3\u001b[0m df_importance = df_importance.groupby([\n\u001b[0;32m      4\u001b[0m         \u001b[1;34m'feature_name'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m     ])['importance'].agg('mean').sort_values(ascending=False).reset_index()\n",
      "\u001b[1;31mNameError\u001b[0m: name 'df_importance_list' is not defined"
     ]
    }
   ],
   "source": [
    "# 特征重要性\n",
    "df_importance = pd.concat(df_importance_list)\n",
    "df_importance = df_importance.groupby([\n",
    "        'feature_name'\n",
    "    ])['importance'].agg('mean').sort_values(ascending=False).reset_index()\n",
    "    \n",
    "    # 生成线下\n",
    "df_oof = pd.concat(oof)\n",
    "df_oof.sort_values(['user_id', 'pred'],\n",
    "                       inplace=True,\n",
    "                       ascending=[True, False])\n",
    "    \n",
    "\n",
    "    # 计算相关指标\n",
    "# total = df_query[df_query['click_article_id'] != -1].user_id.nunique()\n",
    "#     hitrate_5, mrr_5, hitrate_10, mrr_10, hitrate_20, mrr_20, hitrate_40, mrr_40, hitrate_50, mrr_50 = evaluate(\n",
    "#         df_oof, total)\n",
    "#     log.debug(\n",
    "#         f'{hitrate_5}, {mrr_5}, {hitrate_10}, {mrr_10}, {hitrate_20}, {mrr_20}, {hitrate_40}, {mrr_40}, {hitrate_50}, {mrr_50}'\n",
    "#     )\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "    # 生成提交文件\n",
    "df_sub = gen_sub(prediction)\n",
    "df_sub.sort_values(['user_id'], inplace=True)\n",
    "os.makedirs('../prediction_result', exist_ok=True)\n",
    "df_sub.to_csv(f'./prediction_result/result.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: './user_data/data/online/feature.pkl'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_784/2064628570.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mdf_feature\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_pickle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'./user_data/data/online/feature.pkl'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32mD:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\pandas\\io\\pickle.py\u001b[0m in \u001b[0;36mread_pickle\u001b[1;34m(path, compression)\u001b[0m\n\u001b[0;32m    143\u001b[0m     \"\"\"\n\u001b[0;32m    144\u001b[0m     \u001b[0mpath\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_stringify_path\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 145\u001b[1;33m     \u001b[0mf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_get_handle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"rb\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcompression\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcompression\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mis_text\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    146\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    147\u001b[0m     \u001b[1;31m# 1) try standard libary Pickle\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\evo\\anaconda\\envs\\tf1.14\\lib\\site-packages\\pandas\\io\\common.py\u001b[0m in \u001b[0;36m_get_handle\u001b[1;34m(path_or_buf, mode, encoding, compression, memory_map, is_text)\u001b[0m\n\u001b[0;32m    403\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    404\u001b[0m             \u001b[1;31m# Binary mode\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 405\u001b[1;33m             \u001b[0mf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath_or_buf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    406\u001b[0m         \u001b[0mhandles\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    407\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './user_data/data/online/feature.pkl'"
     ]
    }
   ],
   "source": [
    "df_feature = pd.read_pickle('./user_data/data/online/feature.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "online_predict(df_feature)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:tf1.14]",
   "language": "python",
   "name": "conda-env-tf1.14-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
