{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T05:12:39.605784Z",
     "start_time": "2019-03-12T05:12:30.355777Z"
    }
   },
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "GBDT-LR模型\n",
    " 1.准备GBDT输入的label encoding\n",
    " 2.准备GBDT输入的 one-hot encoding\n",
    " 3.建立GBDT-LR模型，并训练调参\n",
    " 4.预测结果    \n",
    "    \n",
    "\"\"\"\n",
    "\n",
    "##==================== 导入工具包 ====================##\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import random\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.linear_model import SGDClassifier  # using SGDClassifier for training incrementally\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from dummyPy import OneHotEncoder  # for one-hot encoding on a large scale of chunks\n",
    "from sklearn.metrics import log_loss\n",
    "import matplotlib.pyplot as plt\n",
    "import pickle\n",
    "import gc\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T05:12:39.630720Z",
     "start_time": "2019-03-12T05:12:39.608774Z"
    }
   },
   "outputs": [],
   "source": [
    "##==================== 文件路径 ====================##\n",
    "\n",
    "fp_train_f = \"feature_engineering/train_f.csv\"\n",
    "fp_test_f  = \"feature_engineering/test_f.csv\"\n",
    "## 子训练集\n",
    "#fp_sub_train_f = \"feature_engineering/sub_train_f.csv\"\n",
    "#fp_sub_test_f = \"feature_engineering/sub_test_f.csv\"\n",
    "\n",
    "## label encoder for gbdt input\n",
    "fp_lb_enc = \"feature_engineering/lb_enc\"\n",
    "\n",
    "## one-hot encoder for gbdt output\n",
    "fp_oh_enc_gbdt = \"gbdt/oh_enc_gbdt\"\n",
    "\n",
    "## 预训练模型的存储\n",
    "fp_lr_model = \"lr/lr_model\"\n",
    "fp_gbdt_model = \"gbdt/gbdt_model\"\n",
    "\n",
    "## 提交的数据\n",
    "\n",
    "fp_sub_gbdt_lr = \"gbdt-lr/GBDT-LR_submission.csv\"\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T05:12:39.662628Z",
     "start_time": "2019-03-12T05:12:39.640690Z"
    }
   },
   "outputs": [],
   "source": [
    "##==================== GBDT-LR 模型 ====================##\n",
    "## feature names\n",
    "cols = ['C1',\n",
    "        'banner_pos', \n",
    "        'site_domain', \n",
    "        'site_id',\n",
    "        'site_category',\n",
    "        'app_id',\n",
    "        'app_category', \n",
    "        'device_type', \n",
    "        'device_conn_type',\n",
    "        'C14', \n",
    "        #'C15',\n",
    "        #'C16',\n",
    "        'date',\n",
    "        'time_period',\n",
    "        'weekday',\n",
    "        'C15_C16'  ]\n",
    "\n",
    "cols_train = ['id', 'click']\n",
    "cols_test  = ['id']\n",
    "cols_train.extend(cols)\n",
    "cols_test.extend(cols)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T05:13:23.000956Z",
     "start_time": "2019-03-12T05:12:49.275255Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "14"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#----- 准备数据 -----#\n",
    "df_train = pd.read_csv(fp_train_f,dtype={'id':str})  # data load\n",
    "\n",
    "\n",
    "## label 编码的转换\n",
    "label_enc = pickle.load(open(fp_lb_enc, 'rb'))\n",
    "for col in cols:\n",
    "    df_train[col] = label_enc[col].fit_transform(df_train[col].values)\n",
    "\n",
    "## 为GBDT 和LR 模型训练分别分割数据\n",
    "## 这为了防止过拟合\n",
    "X_train_org = df_train[cols].get_values()\n",
    "y_train_org = df_train['click'].get_values()\n",
    "\n",
    "# 30% 做验证集，70%做训练集\n",
    "X_train_lr, X_valid, y_train_lr, y_valid = train_test_split(X_train_org, y_train_org, test_size = 0.3, random_state = 0)\n",
    "\n",
    "del df_train\n",
    "\n",
    "gc.collect()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T12:12:57.100605Z",
     "start_time": "2019-03-12T12:12:56.259827Z"
    }
   },
   "outputs": [],
   "source": [
    "# 使用GBDT训练好的模型\n",
    "gbdt_model = pickle.load(open(fp_gbdt_model, 'rb'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T12:12:59.291747Z",
     "start_time": "2019-03-12T12:12:59.276751Z"
    }
   },
   "outputs": [],
   "source": [
    "#----- 为 LR 准备数据  -----#\n",
    "id_cols = []\n",
    "for i in range(1, gbdt_model.get_params()['n_estimators']+1):\n",
    "    id_cols.append('tree'+str(i))\n",
    "oh_enc = OneHotEncoder(id_cols)\n",
    "gc.collect()\n",
    "\n",
    "def chunker(seq, size):\n",
    "    return (seq[pos: pos + size] for pos in range(0, len(seq), size))\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T12:14:12.654283Z",
     "start_time": "2019-03-12T12:13:01.726163Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## oh_enc fit the train_set\n",
    "df_train_id = pd.DataFrame(gbdt_model.apply(X_train_org)[:, :, 0], columns=id_cols, dtype=np.int8)\n",
    "\n",
    "for chunk in chunker(df_train_id, 50000):\n",
    "    oh_enc.fit(chunk)\n",
    "    \n",
    "del df_train_id\n",
    "\n",
    "del X_train_org\n",
    "del y_train_org\n",
    "gc.collect()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T12:15:02.525209Z",
     "start_time": "2019-03-12T12:14:12.661265Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "238"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## oh_enc 在测试集上训练\n",
    "df_test_f = pd.read_csv(fp_test_f, \n",
    "                        index_col=None,  dtype={'id':str}, \n",
    "                        chunksize=50000, iterator=True)\n",
    "\n",
    "for chunk in df_test_f:\n",
    "    ## label transform for training set\n",
    "    for col in cols:\n",
    "        chunk[col] = label_enc[col].fit_transform(chunk[col].values)       \n",
    "    X_test = chunk[cols].get_values()\n",
    "    \n",
    "    #----- GBDT-LR 模型 -----#\n",
    "    df_X_test_id = pd.DataFrame(gbdt_model.apply(X_test)[:, :, 0], columns=id_cols, dtype=np.int8)  # gbdt\n",
    "    oh_enc.fit(df_X_test_id)\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-12T12:22:45.986902Z",
     "start_time": "2019-03-12T12:22:45.958977Z"
    }
   },
   "outputs": [],
   "source": [
    "## 存储编码数据\n",
    "pickle.dump(oh_enc, open(fp_oh_enc_gbdt, 'wb'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 读取one-hot编码数据\n",
    "oh_enc = pickle.load(open(fp_oh_enc_gbdt, 'rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#---- LR 模型 -----#\n",
    "lr_model = SGDClassifier(loss='log')  # using log-loss for LogisticRegression\n",
    "\n",
    "## input data (one-hot encoding)\n",
    "df_X_train_lr_id = pd.DataFrame(gbdt_model.apply(X_train_lr)[:, :, 0], columns=id_cols, dtype=np.int8)\n",
    "df_X_train_lr_id['click'] = y_train_lr\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\hplxg\\AppData\\Local\\conda\\conda\\envs\\python3\\lib\\site-packages\\sklearn\\linear_model\\stochastic_gradient.py:128: FutureWarning: max_iter and tol parameters have been added in <class 'sklearn.linear_model.stochastic_gradient.SGDClassifier'> in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3.\n",
      "  \"and default tol will be 1e-3.\" % type(self), FutureWarning)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "14"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## fitting\n",
    "gc.collect()\n",
    "for chunk in chunker(df_X_train_lr_id,100000):\n",
    "    X_train = oh_enc.transform(chunk[id_cols])\n",
    "    y_train = chunk['click'].astype('int')\n",
    "    lr_model.partial_fit(X_train, y_train, classes = [0,1]) \n",
    "\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "log loss of LR on train set: 0.41438\n",
      "log loss of LR on train set: 0.41263\n",
      "log loss of LR on train set: 0.41158\n",
      "log loss of LR on train set: 0.41212\n",
      "log loss of LR on train set: 0.40941\n",
      "log loss of LR on train set: 0.41180\n",
      "log loss of LR on train set: 0.41377\n",
      "log loss of LR on train set: 0.41251\n",
      "log loss of LR on train set: 0.41517\n",
      "log loss of LR on train set: 0.41411\n",
      "log loss of LR on train set: 0.40870\n",
      "log loss of LR on train set: 0.41433\n",
      "log loss of LR on train set: 0.41612\n",
      "log loss of LR on train set: 0.41638\n",
      "log loss of LR on train set: 0.41516\n",
      "log loss of LR on train set: 0.41155\n",
      "log loss of LR on train set: 0.41762\n",
      "log loss of LR on train set: 0.41451\n",
      "log loss of LR on train set: 0.41226\n",
      "log loss of LR on train set: 0.41179\n",
      "log loss of LR on train set: 0.41431\n",
      "log loss of LR on train set: 0.41295\n",
      "log loss of LR on train set: 0.41129\n",
      "log loss of LR on train set: 0.41331\n",
      "log loss of LR on train set: 0.41323\n",
      "log loss of LR on train set: 0.41564\n",
      "log loss of LR on train set: 0.41365\n",
      "log loss of LR on train set: 0.41472\n",
      "log loss of LR on train set: 0.41454\n",
      "log loss of LR on train set: 0.41376\n",
      "log loss of LR on train set: 0.41610\n",
      "log loss of LR on train set: 0.41307\n",
      "log loss of LR on train set: 0.41175\n",
      "log loss of LR on train set: 0.41194\n",
      "log loss of LR on train set: 0.41024\n"
     ]
    }
   ],
   "source": [
    "## log-loss of training\n",
    "import six # 同时循环\n",
    "\n",
    "log_loss_lr = []\n",
    "for chunk1,chunk2 in six.moves.zip(chunker(df_X_train_lr_id,100000),chunker(y_train_lr,100000)): \n",
    "    #for chunk2 in chunker(y_train_lr,100000):\n",
    "        X_train_id = oh_enc.transform(chunk1[id_cols])\n",
    "        y_pred_lr = lr_model.predict_proba(X_train_id)[:, 1]\n",
    "        log_loss_lr_tmp = log_loss(chunk2, y_pred_lr)\n",
    "        print('log loss of LR on train set: %.5f' % log_loss_lr_tmp)\n",
    "        log_loss_lr.append(log_loss_lr_tmp)\n",
    "        gc.collect()\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "del df_X_train_lr_id\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "log loss of LR on valid set: 0.41568\n",
      "log loss of LR on valid set: 0.41317\n",
      "log loss of LR on valid set: 0.41459\n",
      "log loss of LR on valid set: 0.41156\n",
      "log loss of LR on valid set: 0.41543\n",
      "log loss of LR on valid set: 0.41592\n",
      "log loss of LR on valid set: 0.41312\n",
      "log loss of LR on valid set: 0.41412\n",
      "log loss of LR on valid set: 0.41281\n",
      "log loss of LR on valid set: 0.41217\n",
      "log loss of LR on valid set: 0.41059\n",
      "log loss of LR on valid set: 0.41285\n",
      "log loss of LR on valid set: 0.41624\n",
      "log loss of LR on valid set: 0.41305\n",
      "log loss of LR on valid set: 0.41314\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## log-loss of valid\n",
    "log_loss_lr = []\n",
    "df_X_valid_id = pd.DataFrame(gbdt_model.apply(X_valid)[:, :, 0], columns=id_cols, dtype=np.int8)\n",
    "df_X_valid_id['click'] = y_valid\n",
    "\n",
    "for chunk1,chunk2 in six.moves.zip(chunker(df_X_valid_id,100000),chunker(y_valid,100000)):\n",
    "    X_valid_id = oh_enc.transform(chunk1[id_cols])\n",
    "    y_pred_lr = lr_model.predict_proba(X_valid_id)[:, 1]\n",
    "    log_loss_lr_tmp = log_loss(chunk2, y_pred_lr)\n",
    "    print('log loss of LR on valid set: %.5f' % log_loss_lr_tmp)\n",
    "    log_loss_lr.append(log_loss_lr_tmp)\n",
    "    gc.collect()\n",
    "\n",
    "   \n",
    "\n",
    "#X2_valid = oh_enc.transform(pd.DataFrame(gbdt_model.apply(X_valid)[:, :, 0], columns=id_cols, dtype=np.int8))\n",
    "#y_pred_lr = lr_model.predict_proba(X2_valid)[:, 1]\n",
    "#log_loss_lr = log_loss(y_valid, y_pred_lr)\n",
    "#print('log loss of LR on valid set: %.5f' % log_loss_lr)\n",
    "\n",
    "gc.collect() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 保存预训练模型\n",
    "pickle.dump(lr_model, open(fp_lr_model, 'wb'))\n",
    "\n",
    "lr_model = pickle.load(open(fp_lr_model, 'rb'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "##==================== 预测 ====================##\n",
    "df_test_f = pd.read_csv(fp_test_f, \n",
    "                        index_col=None,  dtype={'id':str}, \n",
    "                        chunksize=100000, iterator=True)        \n",
    "\n",
    "hd = True\n",
    "for chunk in df_test_f:\n",
    "    ## label transform for training set\n",
    "    for col in cols:\n",
    "        chunk[col] = label_enc[col].fit_transform(chunk[col].values)       \n",
    "    X_test = chunk[cols].get_values()\n",
    "    \n",
    "    #----- GBDT-LR -----#\n",
    "    y_pred_gbdt = gbdt_model.predict_proba(X_test)[:, 1]\n",
    "    X_test_gbdt = pd.DataFrame(gbdt_model.apply(X_test)[:, :, 0], columns=id_cols, dtype=np.int8)\n",
    "    X2_test = oh_enc.transform(X_test_gbdt)  # one-hot\n",
    "    y_pred_lr = lr_model.predict_proba(X2_test)[:, 1]  # lr   \n",
    "    \n",
    "    #----- 生成submission -----#\n",
    "    \n",
    "    chunk['click'] = y_pred_lr    \n",
    "    with open(fp_sub_gbdt_lr, 'a',newline='') as f:  # 注意解决空行的问题\n",
    "        chunk.to_csv(f, columns=['id', 'click'], header=hd, index=False)\n",
    "    hd = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
