{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# w5_冯炳驹_124298228"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-01-24T13:44:40.430000Z",
     "start_time": "2018-01-24T13:44:06.913000Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import cPickle\n",
    "\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "\n",
    "class RecommonderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    #用户和活动新的索引\n",
    "    self.userIndex = cPickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = cPickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    #用户-活动关系矩阵R\n",
    "    #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "    \n",
    "    #倒排表\n",
    "    ##每个用户参加的事件\n",
    "    self.itemsForUser = cPickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "    ##事件参加的用户\n",
    "    self.usersForItem = cPickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    #基于模型的协同过滤参数初始化,训练\n",
    "    self.init_SVD()\n",
    "    self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "    #根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "    \n",
    "    #根据活动属性计算出的活动之间的相似度\n",
    "    self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "    \n",
    "    #每个用户的朋友的数目\n",
    "    self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "    #用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "    \n",
    "    #活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "    \n",
    "    \n",
    "    #item cf map\n",
    "    self.item_cf_similarity = np.zeros((self.n_items, self.n_items), dtype=np.float)\n",
    "    self.item_cf_similarity[:,:] = -1 \n",
    "\n",
    "    self.user_fc_similarity = np.zeros((self.n_users, self.n_users), dtype=np.float)\n",
    "    self.user_fc_similarity[:,:] = -1 \n",
    "    \n",
    "    #debug var\n",
    "    self.debug = 1\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "    self.K = K  \n",
    "    \n",
    "    #init parameters\n",
    "    #bias\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)    \n",
    "    \n",
    "    #the small matrix\n",
    "    self.p = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    self.q = random((self.n_items,self.K))/10*(np.sqrt(self.K))  \n",
    "    #item preference facotor\n",
    "    self.y = random((self.n_items, self.K))/10*(np.sqrt(self.K))  \n",
    "    \n",
    "   \n",
    "    #user r mean map\n",
    "    self.user_r_mean = np.zeros(self.n_users)\n",
    "    self.user_r_mean[:] = 0   \n",
    "\n",
    "          \n",
    "  def train_SVD(self,trainfile = 'train.csv', epochs=100,gamma=0.04,Lambda=0.15):\n",
    "    #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "    #gamma：为学习率\n",
    "    #Lambda：正则参数\n",
    "    \n",
    "    #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "    print \"SVD Train...\"\n",
    "    ftrain = open(trainfile, 'r')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0\n",
    "    n_records = 0\n",
    "    uids = []  #每条记录的用户索引\n",
    "    i_ids = [] #每条记录的item索引\n",
    "    #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(\",\")\n",
    "        u = self.userIndex[cols[0]]  #用户\n",
    "        i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  #interested\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    \n",
    "    # 请补充完整SVD模型训练过程\n",
    "    for epoch in range(epochs):\n",
    "        print(\" processing epoch {}\".format(epoch))\n",
    "        \n",
    "        kk = np.random.permutation(uids)  \n",
    "        for j in uids:\n",
    "            u = kk[j]\n",
    "            #用户u点评的item集\n",
    "            user_items = self.itemsForUser[u]\n",
    "            user_items_sizes = len(user_items)\n",
    "            sqrt_user_items_sizes = np.sqrt(user_items_sizes)\n",
    "\n",
    "            #基于用户u点评的item集推测u的implicit偏好\n",
    "            u_impl_prf = 0\n",
    "#             y_u = np.sum(self.y[user_items], axis=0)\n",
    "#             y_u = 0\n",
    "#             for item in user_items:\n",
    "#                 y_u += np.sum(self.y[item], axis=0)\n",
    "\n",
    "#             u_impl_prf = y_u / sqrt_user_items_sizes\n",
    "\n",
    "            for i in user_items:\n",
    "                #预测值\n",
    "                rp = self.mu + self.bu[u] + self.bi[i] + np.dot(self.q[i], self.p[u] + u_impl_prf)\n",
    "                \n",
    "                r = R[u,i]\n",
    "                #误差\n",
    "                e_ui = r - rp\n",
    "\n",
    "                #sgd\n",
    "                self.bu[u] += gamma * (e_ui - Lambda * self.bu[u])\n",
    "                self.bi[i] += gamma * (e_ui - Lambda * self.bi[i])\n",
    "                self.p[u] += gamma * (e_ui * self.q[i] - Lambda * self.p[u])\n",
    "                self.q[i] += gamma * (e_ui * (self.p[u] + u_impl_prf) - Lambda * self.q[i])\n",
    "                \n",
    "                #self.y[i] += gamma * (e_ui * self.q[i] / sqrt_user_items_sizes - Lambda * self.y[i])\n",
    "                \n",
    "        gamma *= 0.93\n",
    "    \n",
    "    print \"SVD++ trained\"\n",
    "    \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    #根据当前参数，预测用户uid对Item（i_id）的打分    \n",
    "    \n",
    "    y_u = 0\n",
    "#     user_items = self.itemsForUser[uid]\n",
    "#     user_items_sizes = len(user_items)\n",
    "#     for item in user_items:\n",
    "#         y_u += np.sum(self.y[item], axis=0)\n",
    "      \n",
    "#     y_u = y_u / np.sqrt(user_items_sizes)\n",
    "        \n",
    "    ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.q[i_id],self.p[uid] + y_u) \n",
    "    \n",
    "   #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    \n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2):\n",
    "    #请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "    if self.user_fc_similarity[uid1][uid2]!=-1:  #如果已经计算好\n",
    "        return self.user_fc_similarity[uid1][uid2]  \n",
    "    \n",
    "    if (uid1 == uid2):\n",
    "        self.user_fc_similarity[uid1][uid2]=1  \n",
    "        self.user_fc_similarity[uid2][uid1]=1  \n",
    "        return 1\n",
    "\n",
    "    similarity = 0.0\n",
    "    \n",
    "    #找用户uid1和uid2都有过打分的事件的集合\n",
    "    #interested, and not_interested 如果是0 和 0 表示用户没有打过分，这里可以做一个优化点，暂时不做\n",
    "    user1_items = self.itemsForUser[uid1]\n",
    "    user2_items = self.itemsForUser[uid2]\n",
    "    \n",
    "    common_items = [i for i in user1_items if i in user2_items]\n",
    "    \n",
    "    if len(common_items) == 0:\n",
    "        self.user_fc_similarity[uid1][uid2]=similarity  \n",
    "        self.user_fc_similarity[uid2][uid1]=similarity  \n",
    "        return similarity\n",
    "    \n",
    "    #userEventScores[user_id, item_id]\\\n",
    "    user1_item_sum = 0\n",
    "    user2_item_sum = 0\n",
    "    for item in common_items:\n",
    "        user1_item_sum += self.userEventScores[uid1, item]\n",
    "        user2_item_sum += self.userEventScores[uid2, item]\n",
    "        \n",
    "    user1_item_r_mean = user1_item_sum/len(common_items)\n",
    "    user2_item_r_mean = user2_item_sum/len(common_items)\n",
    "    \n",
    "    E_common = 0\n",
    "    E_user1 = 0\n",
    "    E_user2 = 0\n",
    "    for item in common_items:\n",
    "        user1_item_r = self.userEventScores[uid1, item]\n",
    "        user2_item_r = self.userEventScores[uid2, item]\n",
    "        E_common += (user1_item_r - user1_item_r_mean) * (user2_item_r - user2_item_r_mean)\n",
    "        E_user1 += np.square(user1_item_r - user1_item_r_mean)\n",
    "        E_user2 += np.square(user2_item_r - user2_item_r_mean)\n",
    "        \n",
    "    if (E_user1 != 0 and E_user2 != 0):\n",
    "        similarity = E_common / (np.sqrt(E_user1 * E_user2))\n",
    "    \n",
    "    self.user_fc_similarity[uid1][uid2]=similarity  \n",
    "    self.user_fc_similarity[uid2][uid1]=similarity\n",
    "    \n",
    "    self.user_r_mean[uid1] = user1_item_r_mean\n",
    "    self.user_r_mean[uid2] = user2_item_r_mean\n",
    "    \n",
    "    return similarity\n",
    "\n",
    "#   def userCFPred(self, userId, eventId):\n",
    "#      #请补充完整代码\n",
    "#     ans = 0.0\n",
    "#     similarity = 0.0\n",
    "    \n",
    "#     for user in self.usersForItem[eventId]:  #对i_id打过分的所有用户\n",
    "#         sim = self.sim_cal_UserCF(self, userId, user)    #该user与uid之间的相似度\n",
    "#         if (sim <= 0):continue\n",
    "    \n",
    "#         ans += sim * (self.userEventScores[user][eventId] - self.user_r_mean[user])\n",
    "#         similarity += sim\n",
    "        \n",
    "#     ans = self.user_r_mean[userId] + ans/similarity\n",
    "        \n",
    "#     return ans   \n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0\n",
    "    similarity = 0.0\n",
    "    \n",
    "    user_id = self.userIndex[userId]\n",
    "    event_id = self.eventIndex[eventId]\n",
    "    \n",
    "    for user in self.usersForItem[event_id]:  #对i_id打过分的所有用户\n",
    "        sim = self.sim_cal_UserCF(user_id, user)    #该user与uid之间的相似度\n",
    "        if (sim <= 0):continue\n",
    "    \n",
    "        ans += sim * self.userEventScores[user,event_id]\n",
    "        similarity += sim\n",
    "        \n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if similarity == 0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu \n",
    "\n",
    "    return ans / similarity  \n",
    "\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    #计算Item i_id1和i_id2之间的相似性\n",
    "    #请补充完整代码\n",
    "    if self.item_cf_similarity[i_id1][i_id2]!=-1:  #如果已经计算好\n",
    "        return self.item_cf_similarity[i_id1][i_id2]\n",
    "    \n",
    "    if (i_id1 == i_id2):\n",
    "        self.item_cf_similarity[i_id1][i_id2]=1  \n",
    "        self.item_cf_similarity[i_id2][i_id1]=1  \n",
    "        return 1\n",
    "        \n",
    "    similarity = 0.0\n",
    "    \n",
    "    #找用户uid1和uid2都有过打分的事件的集合\n",
    "    #interested, and not_interested 如果是0 和 0 表示用户没有打过分，这里可以做一个优化点，暂时不做\n",
    "    item1_user = self.usersForItem[i_id1]\n",
    "    item2_user = self.usersForItem[i_id2]\n",
    "    \n",
    "    common_users = [i for i in item1_user if i in item2_user]\n",
    "    \n",
    "    if len(common_users) == 0:\n",
    "        self.item_fc_similarity[i_id1][i_id2]=similarity  \n",
    "        self.item_fc_similarity[i_id2][i_id1]=similarity  \n",
    "        return similarity\n",
    "    \n",
    "    #userEventScores[user_id, item_id]\\\n",
    "    item1_user_sum = 0\n",
    "    item2_user_sum = 0\n",
    "    for user_id in common_users:\n",
    "        item1_user_sum += self.userEventScores[user_id, i_id1]\n",
    "        item2_user_sum += self.userEventScores[user_id, i_id2]\n",
    "        \n",
    "    item1_user_r_mean = item1_user_sum/len(common_users)\n",
    "    item2_user_r_mean = item2_user_sum/len(common_users)\n",
    "    \n",
    "    E_common = 0\n",
    "    E_item1 = 0\n",
    "    E_item2 = 0\n",
    "    for item in common_users:\n",
    "        item1_user_r = self.userEventScores[user_id, i_id1]\n",
    "        item2_user_r = self.userEventScores[user_id, i_id2]\n",
    "        E_common += (item1_user_r - item1_user_r_mean) * (item2_user_r - item2_user_r_mean)\n",
    "        E_item1 += np.square(item1_user_r - item1_user_r_mean)\n",
    "        E_item2 += np.square(item2_user_r - item2_user_r_mean)\n",
    "        \n",
    "    if (E_item1 != 0 and E_item2 != 0):\n",
    "        similarity = E_common /(np.sqrt(E_item1 * E_item2))\n",
    "    \n",
    "    self.item_cf_similarity[i_id1][i_id2]=similarity  \n",
    "    self.item_cf_similarity[i_id2][i_id1]=similarity  \n",
    "    return similarity  \n",
    "  \n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0\n",
    "    similarity = 0.0\n",
    "    \n",
    "    user_id = self.userIndex[userId]\n",
    "    event_id = self.eventIndex[eventId]\n",
    "    \n",
    "    for event in self.itemsForUser[user_id]:  #对i_id打过分的所有用户\n",
    "        sim = self.sim_cal_ItemCF(event_id, event)    #该user与uid之间的相似度\n",
    "        if (sim <= 0):continue\n",
    "    \n",
    "        ans += sim * self.userEventScores[user_id,event]\n",
    "        similarity += sim\n",
    "        \n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if similarity == 0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu \n",
    "    else:\n",
    "        return ans / similarity\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    #基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "\n",
    "    return self.pred_SVD(u,i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if self.userIndex.has_key(userId):\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "    用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-01-24T13:44:40.491000Z",
     "start_time": "2018-01-24T13:44:40.435000Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'wb')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "      ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "      if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "      fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "      ln += 1\n",
    "      if ln%500 == 0:\n",
    "          print \"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId)\n",
    "          #break;\n",
    "      \n",
    "      cols = line.strip().split(\",\")\n",
    "      userId = cols[0]\n",
    "      eventId = cols[1]\n",
    "      invited = cols[2]\n",
    "      \n",
    "      userCF_reco = RS.userCFReco(userId, eventId)\n",
    "      itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "      svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "      user_reco = RS.userReco(userId, eventId)\n",
    "      evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "      user_pop = RS.userPop(userId)\n",
    "     \n",
    "      frnd_infl = RS.friendInfluence(userId)\n",
    "      evt_pop = RS.eventPop(eventId)\n",
    "      ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "      if train:\n",
    "        ocols.append(cols[4]) # interested\n",
    "        ocols.append(cols[5]) # not_interested\n",
    "      fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-01-24T13:48:37.122000Z",
     "start_time": "2018-01-24T13:44:40.495000Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      " processing epoch 0\n",
      " processing epoch 1\n",
      " processing epoch 2\n",
      " processing epoch 3\n",
      " processing epoch 4\n",
      " processing epoch 5\n",
      " processing epoch 6\n",
      " processing epoch 7\n",
      " processing epoch 8\n",
      " processing epoch 9\n",
      " processing epoch 10\n",
      " processing epoch 11\n",
      " processing epoch 12\n",
      " processing epoch 13\n",
      " processing epoch 14\n",
      " processing epoch 15\n",
      " processing epoch 16\n",
      " processing epoch 17\n",
      " processing epoch 18\n",
      " processing epoch 19\n",
      " processing epoch 20\n",
      " processing epoch 21\n",
      " processing epoch 22\n",
      " processing epoch 23\n",
      " processing epoch 24\n",
      " processing epoch 25\n",
      " processing epoch 26\n",
      " processing epoch 27\n",
      " processing epoch 28\n",
      " processing epoch 29\n",
      " processing epoch 30\n",
      " processing epoch 31\n",
      " processing epoch 32\n",
      " processing epoch 33\n",
      " processing epoch 34\n",
      " processing epoch 35\n",
      " processing epoch 36\n",
      " processing epoch 37\n",
      " processing epoch 38\n",
      " processing epoch 39\n",
      " processing epoch 40\n",
      " processing epoch 41\n",
      " processing epoch 42\n",
      " processing epoch 43\n",
      " processing epoch 44\n",
      " processing epoch 45\n",
      " processing epoch 46\n",
      " processing epoch 47\n",
      " processing epoch 48\n",
      " processing epoch 49\n",
      " processing epoch 50\n",
      " processing epoch 51\n",
      " processing epoch 52\n",
      " processing epoch 53\n",
      " processing epoch 54\n",
      " processing epoch 55\n",
      " processing epoch 56\n",
      " processing epoch 57\n",
      " processing epoch 58\n",
      " processing epoch 59\n",
      " processing epoch 60\n",
      " processing epoch 61\n",
      " processing epoch 62\n",
      " processing epoch 63\n",
      " processing epoch 64\n",
      " processing epoch 65\n",
      " processing epoch 66\n",
      " processing epoch 67\n",
      " processing epoch 68\n",
      " processing epoch 69\n",
      " processing epoch 70\n",
      " processing epoch 71\n",
      " processing epoch 72\n",
      " processing epoch 73\n",
      " processing epoch 74\n",
      " processing epoch 75\n",
      " processing epoch 76\n",
      " processing epoch 77\n",
      " processing epoch 78\n",
      " processing epoch 79\n",
      " processing epoch 80\n",
      " processing epoch 81\n",
      " processing epoch 82\n",
      " processing epoch 83\n",
      " processing epoch 84\n",
      " processing epoch 85\n",
      " processing epoch 86\n",
      " processing epoch 87\n",
      " processing epoch 88\n",
      " processing epoch 89\n",
      " processing epoch 90\n",
      " processing epoch 91\n",
      " processing epoch 92\n",
      " processing epoch 93\n",
      " processing epoch 94\n",
      " processing epoch 95\n",
      " processing epoch 96\n",
      " processing epoch 97\n",
      " processing epoch 98\n",
      " processing epoch 99\n",
      "SVD++ trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n",
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()\n",
    "print \"生成训练数据...\\n\"\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print \"生成预测数据...\\n\"\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  },
  "toc": {
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": "block",
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
