{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "path='../../event_recommendation_engine_challenge_data/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 197,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import pickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "\n",
    "class RecommonderSystem:\n",
    "    def __init__(self,X):\n",
    "        # 读入数据做初始化\n",
    "        self.X=X\n",
    "    \n",
    "        #用户和活动新的索引\n",
    "        self.userIndex = pickle.load(open(path+\"PE_userIndex.pkl\", 'rb'))\n",
    "        self.eventIndex = pickle.load(open(path+\"PE_eventIndex.pkl\", 'rb'))\n",
    "        self.n_users = len(self.userIndex)\n",
    "        self.n_items = len(self.eventIndex)\n",
    "    \n",
    "        #用户-活动关系矩阵R\n",
    "        #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "        self.userEventScores = sio.mmread(path+\"PE_userEventScores\").todense()\n",
    "    \n",
    "        #倒排表\n",
    "        ##每个用户参加的事件\n",
    "        self.itemsForUser = pickle.load(open(path+\"PE_eventsForUser.pkl\", 'rb'))\n",
    "        ##事件参加的用户\n",
    "        self.usersForItem = pickle.load(open(path+\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "        #基于模型的协同过滤参数初始化,训练\n",
    "        self.init_SVD()\n",
    "        self.train_SVD(self.X)\n",
    "    \n",
    "        #根据用户属性计算出的用户之间的相似度\n",
    "        self.userSimMatrix = sio.mmread(path+\"US_userSimMatrix\").todense()\n",
    "    \n",
    "        #根据活动属性计算出的活动之间的相似度\n",
    "        self.eventPropSim = sio.mmread(path+\"EV_eventPropSim\").todense()\n",
    "        self.eventContSim = sio.mmread(path+\"EV_eventContSim\").todense()\n",
    "    \n",
    "        #每个用户的朋友的数目\n",
    "        self.numFriends = sio.mmread(path+\"UF_numFriends\")\n",
    "        #用户的每个朋友参加活动的分数对该用户的影响\n",
    "        self.userFriends = sio.mmread(path+\"UF_userFriends\").todense()\n",
    "    \n",
    "        #活动本身的热度\n",
    "        self.eventPopularity = sio.mmread(path+\"EA_eventPopularity\").todense()\n",
    "        \n",
    "        self.n_Items = len(self.usersForItem)+1 #数组的索引从0开始，浪费第0个元素\n",
    "        self.similarity = np.zeros((self.n_Items, self.n_Items), dtype=np.float)\n",
    "        self.similarity[:,:] = -1\n",
    "\n",
    "\n",
    "    def init_SVD(self, K=20):\n",
    "        #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "        self.K = K  \n",
    "    \n",
    "        #init parameters\n",
    "        #bias\n",
    "        self.bi = {}  \n",
    "        self.bu = {} \n",
    "        self.mu = 0.0\n",
    "    \n",
    "        #the small matrix\n",
    "        self.P = {}\n",
    "        self.Q = {}        \n",
    "          \n",
    "    def train_SVD(self,X, steps=100,gamma=0.04,Lambda=0.15):\n",
    "        #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "        #gamma：为学习率\n",
    "        #Lambda：正则参数\n",
    "    \n",
    "        #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "        print (\"SVD Train...\")\n",
    "        self.mu = np.mean(self.X[:,4])\n",
    "        for step in range(steps): \n",
    "            print ('the ',step,'-th  step is running')  \n",
    "\n",
    "            uids = []  #每条记录的用户索引\n",
    "            i_ids = [] #每条记录的item索引\n",
    "            #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "            R = np.zeros((self.n_users, self.n_items))\n",
    "            \n",
    "            #将训练样本打散顺序\n",
    "            kk = np.random.permutation(self.X.shape[0])\n",
    "            for j in range(self.X.shape[0]):\n",
    "                #每次一个训练样本\n",
    "                k=kk[j]\n",
    "                u = self.userIndex[bytes(str(self.X[k][0]),'utf-8')]  #用户\n",
    "                i = self.eventIndex[bytes(str(self.X[k][1]),'utf-8')] #活动\n",
    "        \n",
    "                uids.append(u)\n",
    "                i_ids.append(i)\n",
    "                self.bi.setdefault(i,0)  \n",
    "                self.bu.setdefault(u,0) \n",
    "        \n",
    "                R[u,i] = int(self.X[k][4])  #interested\n",
    "                #预测残差\n",
    "                 \n",
    "                self.Q.setdefault(i,random((self.K,1))/10*(np.sqrt(self.K)))  \n",
    "                self.P.setdefault(u,random((self.K,1))/10*(np.sqrt(self.K)))\n",
    "                eui=R[u,i]-self.pred_SVD(u,i)\n",
    "            \n",
    "                #随机梯度下降，更新\n",
    "                self.bu[u]+=gamma*(eui-Lambda*self.bu[u])  \n",
    "                self.bi[i]+=gamma*(eui-Lambda*self.bi[i]) \n",
    "                \n",
    "                temp=self.Q[i]  \n",
    "                self.Q[i]+=gamma*(eui*self.P[u]-Lambda*self.Q[i])  \n",
    "                self.P[u]+=gamma*(eui*temp-Lambda*self.P[u])\n",
    "                #学习率递减\n",
    "            gamma=gamma*0.93\n",
    "        # 请补充完整SVD模型训练过程\n",
    "        print (\"SVD trained\")\n",
    "    \n",
    "    def pred_SVD(self, uid, i_id):\n",
    "        #根据当前参数，预测用户uid对Item（i_id）的打分 \n",
    "        self.bi.setdefault(i_id,0)  \n",
    "        self.bu.setdefault(uid,0)\n",
    "        self.P.setdefault(uid,np.zeros((self.K,1)))  \n",
    "        self.Q.setdefault(i_id,np.zeros((self.K,1)))  \n",
    "\n",
    "        if (self.Q[i_id].all()==None):  \n",
    "            self.Q[i_id]=np.zeros((self.K,1))  \n",
    "        if (self.P[uid].all()==None):  \n",
    "            self.P[uid]=np.zeros((self.K,1)) \n",
    "        ans=self.mu + self.bi[i_id] + self.bu[uid] + np.sum(self.Q[i_id]*self.P[uid])  \n",
    "        \n",
    "        #ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "        \n",
    "        #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans  \n",
    "\n",
    "    def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "        #请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "        if self.similarity[uid1][uid2]!=-1:  #如果已经计算好\n",
    "            return self.similarity[uid1][uid2]  \n",
    "        \n",
    "        si={}  \n",
    "        for item in self.itemsForUser[uid1]:  #uid1所有打过分的Item1\n",
    "            if item in self.itemsForUser[uid2]:  #如果uid2也对该Item打过分\n",
    "                si[item]=1  #item为一个有效item\n",
    "        \n",
    "        #print si\n",
    "        n=len(si)   #有效item数，有效item为即对uid对Item打过分，uid2也对Item打过分\n",
    "        if (n==0):  #没有共同打过分的item，相似度设为1.因为最低打分为1？\n",
    "            self.similarity[uid1][uid2]=0  \n",
    "            self.similarity[uid1][uid2]=0  \n",
    "            return 0  \n",
    "        \n",
    "        #用户uid1打过分的所有有效的item\n",
    "        s1=np.array([self.itemsForUser[uid1][item] for item in si])  \n",
    "        \n",
    "        #用户uid2打过分的所有有效的Item\n",
    "        s2=np.array([self.itemsForUser[uid2][item] for item in si])  \n",
    "        \n",
    "        sum1=np.sum(s1)  \n",
    "        sum2=np.sum(s2)  \n",
    "        sum1Sq=np.sum(s1**2)  \n",
    "        sum2Sq=np.sum(s2**2)  \n",
    "        pSum=np.sum(s1*s2)  \n",
    "        \n",
    "        #分子\n",
    "        num=pSum-(sum1*sum2/n)  \n",
    "        \n",
    "        #分母\n",
    "        den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "        if den==0:  \n",
    "            self.similarity[uid1][uid2]=0  \n",
    "            self.similarity[uid2][uid1]=0  \n",
    "            return 0  \n",
    "        similarity=num/den\n",
    "        self.similarity[uid1][uid2]= similarity \n",
    "        self.similarity[uid2][uid1]= similarity \n",
    "    \n",
    "        return similarity  \n",
    "\n",
    "    def userCFReco(self, uid, i_id):\n",
    "        \"\"\"\n",
    "        根据User-based协同过滤，得到event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "            for every other user v that has a preference for i\n",
    "            compute similarity s between u and v\n",
    "            incorporate v's preference for i weighted by s into running aversge\n",
    "            return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        #请补充完整代码\n",
    "        sim_accumulate=0.0\n",
    "        rat_acc=0.0\n",
    "        \n",
    "        for user in self.usersForItem[i_id]:  #对i_id打过分的所有用户\n",
    "            sim = self.sim_cal_UserCF(user,uid)    #该user与uid之间的相似度\n",
    "            if sim<=0:continue              \n",
    "            rat_acc += sim * self.usersForItem[i_id][user] \n",
    "            sim_accumulate += sim  \n",
    "        \n",
    "        #print rat_acc,sim_accumulate  \n",
    "        if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "            return  self.mu  \n",
    "        ans=rat_acc/sim_accumulate\n",
    "        return ans\n",
    "\n",
    "\n",
    "        \n",
    "    def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "        #计算Item i_id1和i_id2之间的相似性\n",
    "        if self.similarity[i_id1][i_id2]!=-1:  #如果已经计算好\n",
    "            return self.similarity[i_id1][i_id2]  \n",
    "        \n",
    "        si={}  \n",
    "        for user in self.usersForItem[i_id1]:  #所有对Item1打过分的的user\n",
    "            if user in self.usersForItem[i_id2]:  #如果该用户对Item2也打过分\n",
    "                #print self.UsersForItem[i_id2]\n",
    "                si[user]=1  #user为一个有效用用户\n",
    "        \n",
    "        #print si\n",
    "        n=len(si)   #有效用户数，有效用户为即对Item1打过分，也对Item2打过分\n",
    "        if (n==0):  #没有共同打过分的用户，相似度设为1.因为最低打分为1？\n",
    "            self.similarity[i_id1][i_id2]=0  \n",
    "            self.similarity[i_id1][i_id1]=0  \n",
    "            return 0  \n",
    "        \n",
    "        #所有有效用户对Item1的打分\n",
    "        s1=np.array([self.usersForItem[i_id1][u] for u in si])  \n",
    "        \n",
    "        #所有有效用户对Item2的打分\n",
    "        s2=np.array([self.usersForItem[i_id2][u] for u in si])  \n",
    "        \n",
    "        sum1=np.sum(s1)  \n",
    "        sum2=np.sum(s2)  \n",
    "        sum1Sq=np.sum(s1**2)  \n",
    "        sum2Sq=np.sum(s2**2)  \n",
    "        pSum=np.sum(s1*s2)  \n",
    "        \n",
    "        #分子\n",
    "        num=pSum-(sum1*sum2/n)  \n",
    "        \n",
    "        #分母\n",
    "        den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "        if den==0:  \n",
    "            self.similarity[i_id1][i_id2]=0  \n",
    "            self.similarity[i_id2][i_id1]=0  \n",
    "            return 0  \n",
    "        similarity=num/den\n",
    "        \n",
    "        self.similarity[i_id1][i_id2]=similarity  \n",
    "        self.similarity[i_id2][i_id1]=similarity  \n",
    "        return similarity\n",
    "        #请补充完整代码\n",
    "    \n",
    "    def eventCFReco(self, uid, i_id):    \n",
    "        \"\"\"\n",
    "        根据基于物品的协同过滤，得到Event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i \n",
    "            for every item j tht u has a preference for\n",
    "                compute similarity s between i and j\n",
    "                add u's preference for j weighted by s to a running average\n",
    "        return top items, ranked by weighted average\n",
    "        \"\"\"\n",
    "        sim_accumulate=0.0  \n",
    "        rat_acc=0.0  \n",
    "            \n",
    "        for item in self.itemsForUser[uid]:  #用户uid打过分的所有Item\n",
    "            sim = self.sim_cal(item,i_id)    #该Item与i_id之间的相似度\n",
    "            if sim<0:continue  \n",
    "            #print sim,self.user_movie[uid][item],sim*self.user_movie[uid][item]  \n",
    "            \n",
    "            rat_acc += sim * self.itemsForUser[uid][item]  \n",
    "            sim_accumulate += sim  \n",
    "        \n",
    "        #print rat_acc,sim_accumulate  \n",
    "        if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "            return  self.mu  \n",
    "        ans = rat_acc/sim_accumulate\n",
    "        return ans\n",
    "        #请补充完整代码\n",
    "\n",
    "    \n",
    "    def svdCFReco(self, userId, eventId):\n",
    "        #基于模型的协同过滤, SVD++/LFM\n",
    "        u = self.userIndex[userId]\n",
    "        i = self.eventIndex[eventId]\n",
    "\n",
    "        return self.pred_SVD(u,i)\n",
    "\n",
    "    def userReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "            for every other user v that has a preference for i\n",
    "                compute similarity s between u and v\n",
    "                incorporate v's preference for i weighted by s into running aversge\n",
    "        return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "\n",
    "        vs = self.userEventScores[:, j]\n",
    "        sims = self.userSimMatrix[i, :]\n",
    "\n",
    "        prod = sims * vs\n",
    "\n",
    "        try:\n",
    "            return prod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            return 0\n",
    "\n",
    "    def eventReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i \n",
    "            for every item j that u has a preference for\n",
    "                compute similarity s between i and j\n",
    "                add u's preference for j weighted by s to a running average\n",
    "        return top items, ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "        js = self.userEventScores[i, :]\n",
    "        psim = self.eventPropSim[:, j]\n",
    "        csim = self.eventContSim[:, j]\n",
    "        pprod = js * psim\n",
    "        cprod = js * csim\n",
    "        \n",
    "        pscore = 0\n",
    "        cscore = 0\n",
    "        try:\n",
    "            pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        try:\n",
    "            cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        return pscore, cscore\n",
    "\n",
    "    def userPop(self, userId):\n",
    "        \"\"\"\n",
    "        基于用户的朋友个数来推断用户的社交程度\n",
    "        主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "        \"\"\"\n",
    "        if userId in self.userIndex:\n",
    "            i = self.userIndex[userId]\n",
    "            try:\n",
    "                return self.numFriends[0, i]\n",
    "            except IndexError:\n",
    "                return 0\n",
    "        else:\n",
    "            return 0\n",
    "\n",
    "    def friendInfluence(self, userId):\n",
    "        \"\"\"\n",
    "        朋友对用户的影响\n",
    "        主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "        用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "        \"\"\"\n",
    "        nusers = np.shape(self.userFriends)[1]\n",
    "        i = self.userIndex[userId]\n",
    "        return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "    def eventPop(self, eventId):\n",
    "        \"\"\"\n",
    "        本活动本身的热度\n",
    "        主要是通过参与的人数来界定的\n",
    "        \"\"\"\n",
    "        i = self.eventIndex[eventId]\n",
    "        return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 202,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(path+fn, 'rb')\n",
    "    fout = open(path+\"RS_\" + fn, 'wb')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(b\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "        ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "    if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "    ocolnames_str=\",\".join([str(s) for s in ocolnames if s not in [None]])+ \"\\n\"\n",
    "    ocolnames_str=bytes(ocolnames_str,'utf-8')\n",
    "    fout.write(ocolnames_str)\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "        ln += 1\n",
    "        if ln%500 == 0:\n",
    "            print(\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "          #break;\n",
    "      \n",
    "        cols = line.strip().split(b\",\")\n",
    "        userId = cols[0]\n",
    "        eventId = cols[1]\n",
    "        invited = cols[2]\n",
    "      \n",
    "        userCF_reco = RS.userCFReco(userId, eventId)\n",
    "        itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "        svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "        user_reco = RS.userReco(userId, eventId)\n",
    "        evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "        user_pop = RS.userPop(userId)\n",
    "     \n",
    "        frnd_infl = RS.friendInfluence(userId)\n",
    "        evt_pop = RS.eventPop(eventId)\n",
    "        ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "        if train:\n",
    "            ocols.append(cols[4]) # interested\n",
    "            ocols.append(cols[5]) # not_interested\n",
    "        ocols_str=\",\".join(map(lambda x: str(x), ocols))+ \"\\n\"\n",
    "        ocols_str=bytes(ocols_str,'utf-8')\n",
    "        fout.write(ocols_str)\n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 203,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd \n",
    "df_train = pd.read_csv(path+'train.csv')\n",
    "train_data = df_train.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 204,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd \n",
    "df_test = pd.read_csv(path+'test.csv')\n",
    "test_data = df_test.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 205,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "the  0 -th  step is running\n",
      "the  1 -th  step is running\n",
      "the  2 -th  step is running\n",
      "the  3 -th  step is running\n",
      "the  4 -th  step is running\n",
      "the  5 -th  step is running\n",
      "the  6 -th  step is running\n",
      "the  7 -th  step is running\n",
      "the  8 -th  step is running\n",
      "the  9 -th  step is running\n",
      "the  10 -th  step is running\n",
      "the  11 -th  step is running\n",
      "the  12 -th  step is running\n",
      "the  13 -th  step is running\n",
      "the  14 -th  step is running\n",
      "the  15 -th  step is running\n",
      "the  16 -th  step is running\n",
      "the  17 -th  step is running\n",
      "the  18 -th  step is running\n",
      "the  19 -th  step is running\n",
      "the  20 -th  step is running\n",
      "the  21 -th  step is running\n",
      "the  22 -th  step is running\n",
      "the  23 -th  step is running\n",
      "the  24 -th  step is running\n",
      "the  25 -th  step is running\n",
      "the  26 -th  step is running\n",
      "the  27 -th  step is running\n",
      "the  28 -th  step is running\n",
      "the  29 -th  step is running\n",
      "the  30 -th  step is running\n",
      "the  31 -th  step is running\n",
      "the  32 -th  step is running\n",
      "the  33 -th  step is running\n",
      "the  34 -th  step is running\n",
      "the  35 -th  step is running\n",
      "the  36 -th  step is running\n",
      "the  37 -th  step is running\n",
      "the  38 -th  step is running\n",
      "the  39 -th  step is running\n",
      "the  40 -th  step is running\n",
      "the  41 -th  step is running\n",
      "the  42 -th  step is running\n",
      "the  43 -th  step is running\n",
      "the  44 -th  step is running\n",
      "the  45 -th  step is running\n",
      "the  46 -th  step is running\n",
      "the  47 -th  step is running\n",
      "the  48 -th  step is running\n",
      "the  49 -th  step is running\n",
      "the  50 -th  step is running\n",
      "the  51 -th  step is running\n",
      "the  52 -th  step is running\n",
      "the  53 -th  step is running\n",
      "the  54 -th  step is running\n",
      "the  55 -th  step is running\n",
      "the  56 -th  step is running\n",
      "the  57 -th  step is running\n",
      "the  58 -th  step is running\n",
      "the  59 -th  step is running\n",
      "the  60 -th  step is running\n",
      "the  61 -th  step is running\n",
      "the  62 -th  step is running\n",
      "the  63 -th  step is running\n",
      "the  64 -th  step is running\n",
      "the  65 -th  step is running\n",
      "the  66 -th  step is running\n",
      "the  67 -th  step is running\n",
      "the  68 -th  step is running\n",
      "the  69 -th  step is running\n",
      "the  70 -th  step is running\n",
      "the  71 -th  step is running\n",
      "the  72 -th  step is running\n",
      "the  73 -th  step is running\n",
      "the  74 -th  step is running\n",
      "the  75 -th  step is running\n",
      "the  76 -th  step is running\n",
      "the  77 -th  step is running\n",
      "the  78 -th  step is running\n",
      "the  79 -th  step is running\n",
      "the  80 -th  step is running\n",
      "the  81 -th  step is running\n",
      "the  82 -th  step is running\n",
      "the  83 -th  step is running\n",
      "the  84 -th  step is running\n",
      "the  85 -th  step is running\n",
      "the  86 -th  step is running\n",
      "the  87 -th  step is running\n",
      "the  88 -th  step is running\n",
      "the  89 -th  step is running\n",
      "the  90 -th  step is running\n",
      "the  91 -th  step is running\n",
      "the  92 -th  step is running\n",
      "the  93 -th  step is running\n",
      "the  94 -th  step is running\n",
      "the  95 -th  step is running\n",
      "the  96 -th  step is running\n",
      "the  97 -th  step is running\n",
      "the  98 -th  step is running\n",
      "the  99 -th  step is running\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(b'123290209', b'1887085024')\n",
      "train.csv:1000 (userId, eventId)=(b'272886293', b'199858305')\n",
      "train.csv:1500 (userId, eventId)=(b'395305791', b'1582270949')\n",
      "train.csv:2000 (userId, eventId)=(b'527523423', b'3272728211')\n",
      "train.csv:2500 (userId, eventId)=(b'651258472', b'792632006')\n",
      "train.csv:3000 (userId, eventId)=(b'811791433', b'524756826')\n",
      "train.csv:3500 (userId, eventId)=(b'985547042', b'1269035551')\n",
      "train.csv:4000 (userId, eventId)=(b'1107615001', b'173949238')\n",
      "train.csv:4500 (userId, eventId)=(b'1236336671', b'3849306291')\n",
      "train.csv:5000 (userId, eventId)=(b'1414301782', b'2652356640')\n",
      "train.csv:5500 (userId, eventId)=(b'1595465532', b'955398943')\n",
      "train.csv:6000 (userId, eventId)=(b'1747091728', b'2131379889')\n",
      "train.csv:6500 (userId, eventId)=(b'1914182220', b'955398943')\n",
      "train.csv:7000 (userId, eventId)=(b'2071842684', b'1076364848')\n",
      "train.csv:7500 (userId, eventId)=(b'2217853337', b'3051438735')\n",
      "train.csv:8000 (userId, eventId)=(b'2338481531', b'2525447278')\n",
      "train.csv:8500 (userId, eventId)=(b'2489551967', b'520657921')\n",
      "train.csv:9000 (userId, eventId)=(b'2650493630', b'87962584')\n",
      "train.csv:9500 (userId, eventId)=(b'2791418962', b'4223848259')\n",
      "train.csv:10000 (userId, eventId)=(b'2903662804', b'2791462807')\n",
      "train.csv:10500 (userId, eventId)=(b'3036141956', b'3929507420')\n",
      "train.csv:11000 (userId, eventId)=(b'3176074542', b'3459485614')\n",
      "train.csv:11500 (userId, eventId)=(b'3285425249', b'2271782630')\n",
      "train.csv:12000 (userId, eventId)=(b'3410667855', b'1063772489')\n",
      "train.csv:12500 (userId, eventId)=(b'3531604778', b'2584839423')\n",
      "train.csv:13000 (userId, eventId)=(b'3686871863', b'53495098')\n",
      "train.csv:13500 (userId, eventId)=(b'3833637800', b'2415873572')\n",
      "train.csv:14000 (userId, eventId)=(b'3944021305', b'2096772901')\n",
      "train.csv:14500 (userId, eventId)=(b'4075466480', b'3567240505')\n",
      "train.csv:15000 (userId, eventId)=(b'4197193550', b'1628057176')\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(b'182290053', b'2529072432')\n",
      "test.csv:1000 (userId, eventId)=(b'433510318', b'4244463632')\n",
      "test.csv:1500 (userId, eventId)=(b'632808865', b'2845303452')\n",
      "test.csv:2000 (userId, eventId)=(b'813611885', b'2036538169')\n",
      "test.csv:2500 (userId, eventId)=(b'1010701404', b'303459881')\n",
      "test.csv:3000 (userId, eventId)=(b'1210932037', b'2529072432')\n",
      "test.csv:3500 (userId, eventId)=(b'1452921099', b'2705317682')\n",
      "test.csv:4000 (userId, eventId)=(b'1623287180', b'1626678328')\n",
      "test.csv:4500 (userId, eventId)=(b'1855201342', b'2603032829')\n",
      "test.csv:5000 (userId, eventId)=(b'2083900381', b'2529072432')\n",
      "test.csv:5500 (userId, eventId)=(b'2318415276', b'2509151803')\n",
      "test.csv:6000 (userId, eventId)=(b'2528161539', b'4025975316')\n",
      "test.csv:6500 (userId, eventId)=(b'2749110768', b'4244406355')\n",
      "test.csv:7000 (userId, eventId)=(b'2927772127', b'1532377761')\n",
      "test.csv:7500 (userId, eventId)=(b'3199685636', b'1776393554')\n",
      "test.csv:8000 (userId, eventId)=(b'3393388475', b'680270887')\n",
      "test.csv:8500 (userId, eventId)=(b'3601169721', b'154434302')\n",
      "test.csv:9000 (userId, eventId)=(b'3828963415', b'3067222491')\n",
      "test.csv:9500 (userId, eventId)=(b'4018723397', b'2522610844')\n",
      "test.csv:10000 (userId, eventId)=(b'4180064266', b'2658555390')\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem(train_data)\n",
    "print (\"生成训练数据...\\n\")\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print (\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
