{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from joblib import dump, load, Parallel, delayed\n",
    "from sklearn import preprocessing\n",
    "import xlearn\n",
    "import time\n",
    "start = time.clock()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_org = pd.read_csv(\"train\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对数据进行特征工程\n",
    "# 第一步，去除‘id’\n",
    "train_org.drop('id', axis = 1, inplace=True )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = train_org"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 减少数据量，方便运算\n",
    "from sklearn.model_selection import train_test_split\n",
    "X_choose, X_dump, Y_choose, Y_dump=train_test_split(train.drop('click',axis=1),train.loc[:,'click'],random_state=42, test_size=0.8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(8085793, 22) (8085793,)\n"
     ]
    }
   ],
   "source": [
    "print(X_choose.shape, Y_choose.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = X_choose.reset_index(drop=True)\n",
    "train['click'] = Y_choose.values.reshape(-1,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第二步，对时间hour特征进行处理\n",
    "# day 信息，每天在一周的信息\n",
    "day = train.hour % 10000 // 100\n",
    "train['day'] = day % 7\n",
    "\n",
    "# 累计时间信息，用于校验集分类，过后删除\n",
    "hr = (day - 21) * 24 + train.hour % 100\n",
    "train.hour = hr\n",
    "\n",
    "# 每天的时段信息\n",
    "train['hour_range'] = hr % 24"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第三步，对user_id类信息进行处理\n",
    "# 创建个新变量site_app来判断访问的来源\n",
    "train['site_app'] = 0\n",
    "train.loc[train['site_id'] == 'ecad2386', 'site_app'] = 1\n",
    "\n",
    "# 合并user_id\n",
    "user_id_list = ['site_id', 'site_domain', 'site_category',\\\n",
    "                'app_id', 'app_domain', 'app_category',]\n",
    "train['user_id'] = ''\n",
    "for each_id in user_id_list:\n",
    "    train['user_id'] = train['user_id']+ train[each_id]                        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 合并device_id类信息\n",
    "device_id_list = ['device_id','device_ip','device_model']\n",
    "train['device_new_id'] = ''\n",
    "for each_id in device_id_list:\n",
    "    train['device_new_id'] = train['device_new_id']+ train[each_id] "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 删除合并前的原特征\n",
    "train = train.drop(['site_id', 'site_domain', 'site_category',\\\n",
    "                              'app_id', 'app_domain', 'app_category',\\\n",
    "                             'device_id','device_ip','device_model'], axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "user_id device_new_id "
     ]
    }
   ],
   "source": [
    "# 'user_id', 'device_new_id'进行lebelencoder\n",
    "feature_list_B = ['user_id', 'device_new_id']\n",
    "# feature_list_B = ['site_id']\n",
    "for each_feature in feature_list_B:\n",
    "    print(each_feature+\" \", end='')\n",
    "    le = preprocessing.LabelEncoder()    \n",
    "    le_data = le.fit_transform(train[each_feature].values.ravel())    \n",
    "    train[each_feature] = le_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 进行数据分离，前8天作为训练数据，第9、10天作为校验数据\n",
    "# 0-8*24 --- 0 - 191\n",
    "period = 192\n",
    "train_data = train.loc[(train.hour>=0) & (train.hour<=period-1)]\n",
    "train_data = train_data.drop('hour',axis = 1)\n",
    "valid_data = train.loc[(train.hour>=period)]\n",
    "valid_data = valid_data.drop('hour',axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    0\n",
       "1    1\n",
       "2    0\n",
       "4    0\n",
       "5    0\n",
       "Name: click, dtype: int64"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data.loc[:,'click'].head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(3238568, 17) (3238569, 17)\n"
     ]
    }
   ],
   "source": [
    "# 随机分配数据给两个不同的训练集，一个进行xgboost，一个是LR 或 FFM\n",
    "from sklearn.model_selection import train_test_split\n",
    "X_xgb, X_ffm, Y_xgb, Y_ffm=train_test_split(train_data.drop('click',axis=1),train_data.loc[:,'click'],random_state=42, test_size=0.5)\n",
    "print(X_xgb.shape, X_ffm.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_valid = valid_data.drop('click',axis = 1)\n",
    "Y_valid = valid_data.loc[:,'click'].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1, 0, 1, ..., 0, 0, 0])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_ffm.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-logloss:0.443089\tvalid-logloss:0.435354\n",
      "[1]\ttrain-logloss:0.429439\tvalid-logloss:0.423302\n",
      "[2]\ttrain-logloss:0.425692\tvalid-logloss:0.420492\n",
      "[3]\ttrain-logloss:0.424117\tvalid-logloss:0.420875\n",
      "[4]\ttrain-logloss:0.422837\tvalid-logloss:0.42066\n",
      "[5]\ttrain-logloss:0.419664\tvalid-logloss:0.412488\n",
      "[6]\ttrain-logloss:0.418982\tvalid-logloss:0.414006\n",
      "[7]\ttrain-logloss:0.417691\tvalid-logloss:0.413067\n",
      "[8]\ttrain-logloss:0.417206\tvalid-logloss:0.413188\n",
      "[9]\ttrain-logloss:0.416905\tvalid-logloss:0.412825\n"
     ]
    }
   ],
   "source": [
    "# 进行xgboost训练\n",
    "import xgboost as xgb\n",
    "valid_dm = xgb.DMatrix(X_valid.values, label = Y_valid.reshape(-1,1))\n",
    "xgb_dm = xgb.DMatrix(X_xgb.values, label = Y_xgb.values.reshape(-1,1))\n",
    "param = {'max_depth':4, 'eta':1, 'objective':'binary:logistic', 'verbose':0,\n",
    "            'subsample':1, 'min_child_weight':50, 'gamma':0,\n",
    "            'colsample_bytree':1, 'seed': 999}\n",
    "plst = list(param.items()) + [('eval_metric', 'logloss')]\n",
    "watchlist = [(xgb_dm, 'train'),(valid_dm,'valid')]\n",
    "xgb_model = None\n",
    "xgb1 = xgb.train(plst,xgb_dm, 10, watchlist, xgb_model = xgb_model)\n",
    "#     Y_pred = xgb1.predict(test_X_dm)\n",
    "# xgb1.save_model('xgb.model')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All finished\n"
     ]
    }
   ],
   "source": [
    "ffm_dm = xgb.DMatrix(X_ffm.values, label=Y_ffm.values.reshape(-1,1))\n",
    "ffm_new_feature = xgb1.predict(ffm_dm,pred_leaf=True)\n",
    "valid_new_feature = xgb1.predict(valid_dm,pred_leaf=True)\n",
    "# test_new_feature = xgb1.predict(test_X_dm,pred_leaf=True)\n",
    "# valid_X_dm = xgb.DMatrix(X_valid.values, label = Y_valid.values.reshape(-1,1))\n",
    "# train_new_feature = np.concatenate((train_new_feature, xgb1.predict(valid_X_dm,pred_leaf=True)), axis = 0)\n",
    "print(\"All finished\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ffm:  (3238569, 10) valid:  (1608656, 10)\n"
     ]
    }
   ],
   "source": [
    "print('ffm: ', ffm_new_feature.shape,'valid: ',valid_new_feature.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为新特征添加特征名\n",
    "\n",
    "column_list = []\n",
    "for i in range(ffm_new_feature.shape[1]):\n",
    "    column_list.append('nf'+'_'+str(i))\n",
    "ffm_mx = pd.DataFrame(ffm_new_feature, columns=column_list)\n",
    "valid_mx = pd.DataFrame(valid_new_feature, columns=column_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_ffm.reset_index(drop=True, inplace=True)\n",
    "ffm_mx.reset_index(drop=True, inplace=True)\n",
    "X_valid.reset_index(drop=True,inplace=True)\n",
    "valid_mx.reset_index(drop=True, inplace=True)\n",
    "ffm_data = pd.concat([X_ffm,ffm_mx],axis=1 )\n",
    "valid_ffm_data = pd.concat([X_valid,valid_mx],axis=1)\n",
    "ffm_data['click'] = Y_ffm.values\n",
    "valid_ffm_data['click'] = Y_valid"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(3238569, 28) (1608656, 28)\n"
     ]
    }
   ],
   "source": [
    "print(ffm_data.shape, valid_ffm_data.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 C1\n",
      "1 banner_pos\n",
      "2 device_type\n",
      "3 device_conn_type\n",
      "4 C14\n",
      "5 C15\n",
      "6 C16\n",
      "7 C17\n",
      "8 C18\n",
      "9 C19\n",
      "10 C20\n",
      "11 C21\n",
      "12 day\n",
      "13 hour_range\n",
      "14 site_app\n",
      "15 user_id\n",
      "16 device_new_id\n",
      "17 nf_0\n",
      "18 nf_1\n",
      "19 nf_2\n",
      "20 nf_3\n",
      "21 nf_4\n",
      "22 nf_5\n",
      "23 nf_6\n",
      "24 nf_7\n",
      "25 nf_8\n",
      "26 nf_9\n",
      "27 click\n"
     ]
    }
   ],
   "source": [
    "for i,n in enumerate(ffm_data.columns):\n",
    "    print(i,n)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Based on Kaggle kernel by Scirpus\n",
    "def convert_to_ffm(df,type,numerics,categories,features):\n",
    "    currentcode = len(numerics)\n",
    "    catdict = {}\n",
    "    catcodes = {}\n",
    "    # Flagging categorical and numerical fields\n",
    "    for x in numerics:\n",
    "         catdict[x] = 0\n",
    "    for x in categories:\n",
    "         catdict[x] = 1\n",
    "    \n",
    "    nrows = df.shape[0]\n",
    "    ncolumns = len(features)\n",
    "    with open(str(type) + \"_ffm.txt\", \"w\") as text_file:\n",
    "        \n",
    "\n",
    "        # Looping over rows to convert each row to libffm format\n",
    "        for n, r in enumerate(range(nrows)):\n",
    "             if r % 100000 ==0:\n",
    "                    print(r,end=\" \")\n",
    "             datastring = \"\"\n",
    "             datarow = df.iloc[r].to_dict()\n",
    "             datastring += str(int(datarow['click']))\n",
    "             # For numerical fields, we are creating a dummy field here\n",
    "             for i, x in enumerate(catdict.keys()):\n",
    "                 if(catdict[x]==0):\n",
    "                     datastring = datastring + \" \"+str(i)+\":\"+ str(i)+\":\"+ str(datarow[x])\n",
    "                 else:\n",
    "             # For a new field appearing in a training example\n",
    "                     if(x not in catcodes):\n",
    "                         catcodes[x] = {}\n",
    "                         currentcode +=1\n",
    "                         catcodes[x][datarow[x]] = currentcode #encoding the feature\n",
    "             # For already encoded fields\n",
    "                     elif(datarow[x] not in catcodes[x]):\n",
    "                         currentcode +=1\n",
    "                         catcodes[x][datarow[x]] = currentcode #encoding the feature\n",
    "                     code = catcodes[x][datarow[x]]\n",
    "                     datastring = datastring + \" \"+str(i)+\":\"+ str(int(code))+\":1\"\n",
    "\n",
    "             datastring += '\\n'\n",
    "             text_file.write(datastring)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Index(['C1', 'banner_pos', 'device_type', 'device_conn_type', 'C14', 'C15',\n",
      "       'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'day', 'hour_range',\n",
      "       'site_app', 'nf_0', 'nf_1', 'nf_2', 'nf_3', 'nf_4', 'nf_5', 'nf_6',\n",
      "       'nf_7', 'nf_8', 'nf_9'],\n",
      "      dtype='object')\n",
      "0 100000 200000 300000 400000 500000 600000 700000 800000 900000 1000000 1100000 1200000 1300000 1400000 1500000 1600000 1700000 1800000 1900000 2000000 2100000 2200000 2300000 2400000 2500000 2600000 2700000 2800000 2900000 3000000 3100000 3200000 0 100000 200000 300000 400000 500000 600000 700000 800000 900000 1000000 1100000 1200000 1300000 1400000 1500000 1600000 "
     ]
    }
   ],
   "source": [
    "# 删除['user_id', 'device_new_id']两个特征，种类太多\n",
    "feature_list_B = ['user_id', 'device_new_id']\n",
    "ffm_data = ffm_data.drop(feature_list_B, axis = 1)\n",
    "valid_ffm_data = valid_ffm_data.drop(feature_list_B, axis = 1)\n",
    "\n",
    "# 转换数据到ffm格式\n",
    "categories = ffm_data.drop('click', axis = 1).columns\n",
    "print(categories)\n",
    "convert_to_ffm(ffm_data,'train','',categories,categories)\n",
    "convert_to_ffm(valid_ffm_data,'valid','',categories,categories)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xlearn as xl\n",
    "\n",
    "# Training task\n",
    "ffm_model = xl.create_ffm() # Use field-aware factorization machine\n",
    "ffm_model.setTrain(\"./train_ffm.txt\")  # Training data\n",
    "ffm_model.setValidate(\"./valid_ffm.txt\")  # Validation data\n",
    "# param:\n",
    "#  0. binary classification\n",
    "#  1. learning rate: 0.2\n",
    "#  2. regular lambda: 0.002\n",
    "#  3. evaluation metric: accuracy\n",
    "param = {'task':'binary', 'lr':50, \n",
    "         'lambda':0.001, 'epoch':500,'k':4}\n",
    "ffm_model.disableEarlyStop();\n",
    "# Start to train\n",
    "# The trained model will be stored in model.out\n",
    "ffm_model.fit(param, './model.out')\n",
    "\n",
    "# # Prediction task\n",
    "# ffm_model.setTest(\"./small_test.txt\")  # Test data\n",
    "# ffm_model.setSigmoid()  # Convert output to 0-1\n",
    "\n",
    "# # Start to predict\n",
    "# # The output result will be stored in output.txt\n",
    "# ffm_model.predict(\"./model.out\", \"./output.txt\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
