{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import cPickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "import scipy.spatial.distance as ssd\n",
    "\n",
    "class RecommonderSystem:\n",
    "    def __init__(self):\n",
    "        # 读入数据做初始化\n",
    "    \n",
    "        #用户和活动新的索引\n",
    "        self.userIndex = cPickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "        self.eventIndex = cPickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "        self.n_users = len(self.userIndex)\n",
    "        self.n_items = len(self.eventIndex)\n",
    "    \n",
    "        #用户-活动关系矩阵R\n",
    "        #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "        self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "    \n",
    "        #倒排表\n",
    "        ##每个用户参加的事件\n",
    "        self.itemsForUser = cPickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "        ##事件参加的用户\n",
    "        self.usersForItem = cPickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "        #基于模型的协同过滤参数初始化,训练\n",
    "        self.init_SVD()\n",
    "        self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "        #根据用户属性计算出的用户之间的相似度\n",
    "        self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "        \n",
    "        #根据活动属性计算出的活动之间的相似度\n",
    "        self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "        self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "    \n",
    "        #每个用户的朋友的数目\n",
    "        self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "        #用户的每个朋友参加活动的分数对该用户的影响\n",
    "        self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "    \n",
    "        #活动本身的热度\n",
    "        self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "\n",
    "    def init_SVD(self, K=20):\n",
    "        #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "        self.K = K  \n",
    "    \n",
    "        #init parameters\n",
    "        #bias\n",
    "        self.bi = np.zeros(self.n_items)  \n",
    "        self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "        #the small matrix\n",
    "        self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "        self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "                  \n",
    "          \n",
    "    def train_SVD(self,trainfile = 'train.csv', steps=100,gamma=0.04,Lambda=0.15):\n",
    "        #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "        #gamma：为学习率\n",
    "        #Lambda：正则参数\n",
    "    \n",
    "        #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "        print \"SVD Train...\"\n",
    "        ftrain = open(trainfile, 'r')\n",
    "        ftrain.readline()\n",
    "        self.mu = 0.0\n",
    "        n_records = 0\n",
    "        uids = []  #每条记录的用户索引\n",
    "        i_ids = [] #每条记录的item索引\n",
    "        #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "        R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "        for line in ftrain:\n",
    "            cols = line.strip().split(\",\")\n",
    "            u = self.userIndex[cols[0]]  #用户\n",
    "            i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "            uids.append(u)\n",
    "            i_ids.append(i)\n",
    "        \n",
    "            R[u,i] = int(cols[4])  #interested\n",
    "            self.mu += R[u,i]\n",
    "            n_records += 1\n",
    "    \n",
    "        ftrain.close()\n",
    "        self.mu /= n_records\n",
    "        \n",
    "        for step in range(steps):\n",
    "            print('the',step,'-th step is running')\n",
    "            eui2sum=0\n",
    "            \n",
    "            record=np.random.permutation(n_records)\n",
    "            for r in range(n_records):\n",
    "                index=record[r]\n",
    "                u=uids[index]\n",
    "                i=i_ids[index]\n",
    "                \n",
    "                #预测残差\n",
    "                eui=R[u,i]-self.pred_SVD(u,i)\n",
    "                #所有样本的残差平方和\n",
    "                eui2sum+=eui**2\n",
    "                \n",
    "                #梯度下降更新参数\n",
    "                self.bu[u]+= gamma * (eui - Lambda * self.bu[u])\n",
    "                self.bi[i]+= gamma * (eui - Lambda * self.bi[i])\n",
    "                \n",
    "                for k in range(self.K):\n",
    "                    t= self.P[u,k] + gamma * eui * self.Q[k,i] - Lambda * self.P[u,k]\n",
    "                    self.Q[k,i]= self.Q[k,i] + gamma * eui * self.P[u,k] - Lambda * self.Q[k,i]\n",
    "                    self.P[u,k]=t  #Q的更新公式用到P，所以等Q更新之后再把新的值赋给P\n",
    "           \n",
    "            gamma=gamma*0.93  #学习率逐步递减\n",
    "            print('the rmse is',eui2sum/self.n_users)\n",
    "\n",
    "        print \"SVD trained\"\n",
    "    \n",
    "    def pred_SVD(self, uid, i_id):\n",
    "        #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "        ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "        \n",
    "        #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans  \n",
    "\n",
    "    def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "        #基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "        similarity = 0.0\n",
    "        #输入参数为两个用户的新索引\n",
    "        u1score=[]\n",
    "        u2score=[]\n",
    "        #取出两个用户各自打过分的过得活动\n",
    "        eventsforu1=self.itemsForUser[uid1]\n",
    "        eventsforu2=self.itemsForUser[uid2]\n",
    "        \n",
    "        a=0#计数器\n",
    "        \n",
    "        for e1 in eventsforu1:\n",
    "            if e1 in eventsforu2:  #找到两个用户都打过分的活动\n",
    "                u1score.append(self.userEventScores[uid1,e1])\n",
    "                u2score.append(self.userEventScores[uid2,e1])\n",
    "                a+=1\n",
    "                #获得两个用户共同打过分的活动的评分\n",
    "        \n",
    "        u1score=np.array(u1score)\n",
    "        u2score=np.array(u2score)\n",
    "        \n",
    "        if a!=0:\n",
    "            similarity = 1-ssd.correlation(u1score,u2score) #根据共同打过分的活动评分计算相似性\n",
    "        else: similarity =0  #如果没有共同打过分的活动则相似度为0\n",
    "    \n",
    "        return similarity  \n",
    "\n",
    "    def userCFReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        根据User-based协同过滤，得到event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "          for every other user v that has a preference for i\n",
    "            compute similarity s between u and v\n",
    "            incorporate v's preference for i weighted by s into running aversge\n",
    "        return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        #得到目标活动的新索引\n",
    "        e=self.eventIndex[eventId]\n",
    "        #得到目标用户的新索引\n",
    "        userid=self.userIndex[userId]\n",
    "        #找到所有评分过这个活动的用户\n",
    "        us=self.usersForItem[e]\n",
    "        if userid in us:\n",
    "            us.remove(userid)  #除去目标用户本身\n",
    "        #对每个评分过此活动的用户，计算与目标用户之间的相似度\n",
    "        fenzi=0.0\n",
    "        fenmu=0.0\n",
    "        for u in us:\n",
    "            sim=self.sim_cal_UserCF(userid,u)#这个用户与目标用户的相似度\n",
    "            if sim==0:\n",
    "                continue\n",
    "            if sim>0: #相似度阈值设为0.5\n",
    "                num=len(self.itemsForUser[u])#这个用户一共评分过的活动数\n",
    "               \n",
    "                summ=float( self.userEventScores[u,:].sum(axis=1) )#这个用户的打分和\n",
    "                    \n",
    "                pianzhi=self.userEventScores[u,e]-summ/num #这个用户的打分偏置\n",
    "                fenzi+=(sim*self.userEventScores[u,e]) #预测公式中的分子累加和，即相似度加权乘以打分偏置的和\n",
    "                fenmu+=sim #预测公式中的分母累加和，即相似度和\n",
    "        \n",
    "        #目标用户的平均评分\n",
    "        num1=len(self.itemsForUser[userid]) \n",
    "        if num1!=0:\n",
    "            summ1=float( self.userEventScores[userid,:].sum(axis=1) )\n",
    "            ra=summ1/num1 \n",
    "        else: ra=0\n",
    "    \n",
    "        #预测公式得出目标用户对目标活动的评分\n",
    "        if fenmu!=0:  #如果在当前阈值设定下有相似度高的邻居用户\n",
    "            ans = ra+fenzi/fenmu\n",
    "        else:    #如果在当前阈值设定下相似度高的邻居用户一个都没有，就取目标用户的打分平均值，若目标用户没打过分就取所有用户的打分平均\n",
    "            if ra!=0:ans=ra\n",
    "            else:ans=self.mu\n",
    "        \n",
    "        #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans   \n",
    "\n",
    "    def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "        #计算Item i_id1和i_id2之间的相似性\n",
    "        #输入参数为两个用活动的新索引\n",
    "        similarity = 0.0\n",
    "        \n",
    "        #self.uniqueEventPairs = cPickle.load(open(\"PE_uniqueEventPairs.pkl\", 'rb'))\n",
    "        \n",
    "        e1score=[]\n",
    "        e2score=[]\n",
    "\n",
    "        #取出两个活动各自打过分的用户\n",
    "        usersfore1=self.usersForItem[i_id1]\n",
    "        usersfore2=self.usersForItem[i_id2]\n",
    "        \n",
    "        b = 0 #计数器\n",
    "        \n",
    "        #找到两个活动中都打过分的用户\n",
    "        for u1 in usersfore1:\n",
    "            if u1 in usersfore2:\n",
    "                e1score.append(self.userEventScores[u1,i_id1])\n",
    "                e2score.append(self.userEventScores[u1,i_id2])\n",
    "                b+=1\n",
    "                #获得同一个用户对两个活动的评分\n",
    "        \n",
    "        e1score=np.array(e1score)\n",
    "        e2score=np.array(e2score)\n",
    "        \n",
    "        if b!=0:\n",
    "            similarity = 1-ssd.correlation(e1score,e2score) #根据根据相同用户群对两个活动的评分计算相似性\n",
    "        else: similarity =0  #如果没有两个活动都参加的用户，则两活动相似度为0\n",
    "    \n",
    "        return similarity   \n",
    "        \n",
    "       # return num/den  \n",
    "            \n",
    "    def eventCFReco(self, userId, eventId):    \n",
    "        \"\"\"\n",
    "        根据基于物品的协同过滤，得到Event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i \n",
    "            for every item j tht u has a preference for\n",
    "                compute similarity s between i and j\n",
    "                add u's preference for j weighted by s to a running average\n",
    "        return top items, ranked by weighted average\n",
    "        \"\"\"\n",
    "        #得到目标活动的新索引\n",
    "        ev=self.eventIndex[eventId]\n",
    "        #得到目标用户的新索引\n",
    "        userid=self.userIndex[userId]\n",
    "        #找到所有被目标用户评分过的活动\n",
    "        es=self.itemsForUser[userid]\n",
    "        if ev in es:\n",
    "            es.remove(ev)  #除去目标活动本身\n",
    "        #对每个被目标用户评分过的活动，计算与目标活动之间的相似度\n",
    "        fenzi=0\n",
    "        fenmu=0\n",
    "        for e in es:\n",
    "            sim=self.sim_cal_ItemCF(e,ev)#这个活动与目标活动的相似度\n",
    "            if sim>0: #相似度阈值设为0.5\n",
    "                xiangsi=self.userEventScores[userid,e] #目标用户对这个相似活动的打分\n",
    "                fenzi+=(sim*xiangsi) #预测公式中的分子累加和，即相似度加权乘以对相似活动打分的累加和\n",
    "                fenmu+=sim #预测公式中的分母累加和，即相似度和\n",
    "        \n",
    "        #目标用户的平均评分\n",
    "        num2=len(self.itemsForUser[userid]) \n",
    "        if num2!=0:\n",
    "            summ2=float( self.userEventScores[userid,:].sum(axis=1) )#这个用户的打分和\n",
    "            ra=summ2/num2\n",
    "        else:ra=0\n",
    "    \n",
    "        #预测公式得出目标用户对目标活动的评分\n",
    "        if fenmu!=0:  #如果在当前阈值设定下有相似度高的活动集合\n",
    "            ans = fenzi/fenmu\n",
    "        else:     #如果在当前阈值设定下相似度高的活动一个都没有，就取目标用户打分平均；如果目标用户没打过分，就取全部用户的打分平均值\n",
    "            if ra!=0:ans=ra\n",
    "            else:ans=self.mu  \n",
    "                \n",
    "       #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans    \n",
    "    \n",
    "    def svdCFReco(self, userId, eventId):\n",
    "        #基于模型的协同过滤, SVD++/LFM\n",
    "        u = self.userIndex[userId]\n",
    "        i = self.eventIndex[eventId]\n",
    "\n",
    "        return self.pred_SVD(u,i)\n",
    "\n",
    "    def userReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "            for every other user v that has a preference for i\n",
    "            compute similarity s between u and v\n",
    "            incorporate v's preference for i weighted by s into running aversge\n",
    "        return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "\n",
    "        vs = self.userEventScores[:, j]\n",
    "        sims = self.userSimMatrix[i, :]\n",
    "\n",
    "        prod = sims * vs\n",
    "\n",
    "        try:\n",
    "            return prod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            return 0\n",
    "\n",
    "    def eventReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i \n",
    "          for every item j that u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "        return top items, ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "        js = self.userEventScores[i, :]\n",
    "        psim = self.eventPropSim[:, j]\n",
    "        csim = self.eventContSim[:, j]\n",
    "        pprod = js * psim\n",
    "        cprod = js * csim\n",
    "    \n",
    "        pscore = 0\n",
    "        cscore = 0\n",
    "        try:\n",
    "            pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        try:\n",
    "            cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        return pscore, cscore\n",
    "\n",
    "    def userPop(self, userId):\n",
    "        \"\"\"\n",
    "        基于用户的朋友个数来推断用户的社交程度\n",
    "        主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "        \"\"\"\n",
    "        if self.userIndex.has_key(userId):\n",
    "            i = self.userIndex[userId]\n",
    "            try:\n",
    "                return self.numFriends[0, i]\n",
    "            except IndexError:\n",
    "                return 0\n",
    "        else: \n",
    "            return 0\n",
    "\n",
    "    def friendInfluence(self, userId):\n",
    "        \"\"\"\n",
    "        朋友对用户的影响\n",
    "        主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "        用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "        \"\"\"\n",
    "        nusers = np.shape(self.userFriends)[1]\n",
    "        i = self.userIndex[userId]\n",
    "        return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "    def eventPop(self, eventId):\n",
    "        \"\"\"\n",
    "        本活动本身的热度\n",
    "        主要是通过参与的人数来界定的\n",
    "        \"\"\"\n",
    "        i = self.eventIndex[eventId]\n",
    "        return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    if train :\n",
    "        fn = \"train.csv\" \n",
    "    else:\n",
    "        fn =\"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'wb')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "        ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "    if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "    \n",
    "    fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "        ln += 1\n",
    "        if ln%500 == 0:\n",
    "            print \"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId)\n",
    "            #break;\n",
    "      \n",
    "        cols = line.strip().split(\",\")\n",
    "        userId = cols[0]\n",
    "        eventId = cols[1]\n",
    "        invited = cols[2]\n",
    "      \n",
    "        userCF_reco = RS.userCFReco(userId, eventId)\n",
    "        itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "        svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "        user_reco = RS.userReco(userId, eventId)\n",
    "        evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "        user_pop = RS.userPop(userId)\n",
    "     \n",
    "        frnd_infl = RS.friendInfluence(userId)\n",
    "        evt_pop = RS.eventPop(eventId)\n",
    "        ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "        if train:\n",
    "            ocols.append(cols[4]) # interested\n",
    "            ocols.append(cols[5]) # not_interested\n",
    "        \n",
    "        fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "('the', 0, '-th step is running')\n",
      "('the rmse is', 1.7205159458966286)\n",
      "('the', 1, '-th step is running')\n",
      "('the rmse is', 0.7658715472955977)\n",
      "('the', 2, '-th step is running')\n",
      "('the rmse is', 0.6914196514477237)\n",
      "('the', 3, '-th step is running')\n",
      "('the rmse is', 0.6467165621591598)\n",
      "('the', 4, '-th step is running')\n",
      "('the rmse is', 0.6114848517326873)\n",
      "('the', 5, '-th step is running')\n",
      "('the rmse is', 0.5834413226996388)\n",
      "('the', 6, '-th step is running')\n",
      "('the rmse is', 0.559997595376072)\n",
      "('the', 7, '-th step is running')\n",
      "('the rmse is', 0.5409958788213844)\n",
      "('the', 8, '-th step is running')\n",
      "('the rmse is', 0.5246142442267007)\n",
      "('the', 9, '-th step is running')\n",
      "('the rmse is', 0.5103843226171783)\n",
      "('the', 10, '-th step is running')\n",
      "('the rmse is', 0.4984087735357926)\n",
      "('the', 11, '-th step is running')\n",
      "('the rmse is', 0.4880043528976865)\n",
      "('the', 12, '-th step is running')\n",
      "('the rmse is', 0.47864222832569775)\n",
      "('the', 13, '-th step is running')\n",
      "('the rmse is', 0.4700747002210836)\n",
      "('the', 14, '-th step is running')\n",
      "('the rmse is', 0.46256989859183)\n",
      "('the', 15, '-th step is running')\n",
      "('the rmse is', 0.45592614571657286)\n",
      "('the', 16, '-th step is running')\n",
      "('the rmse is', 0.4501918882669931)\n",
      "('the', 17, '-th step is running')\n",
      "('the rmse is', 0.44470300802730744)\n",
      "('the', 18, '-th step is running')\n",
      "('the rmse is', 0.4396403256443837)\n",
      "('the', 19, '-th step is running')\n",
      "('the rmse is', 0.4353803923668107)\n",
      "('the', 20, '-th step is running')\n",
      "('the rmse is', 0.43134022953986817)\n",
      "('the', 21, '-th step is running')\n",
      "('the rmse is', 0.4275862791211732)\n",
      "('the', 22, '-th step is running')\n",
      "('the rmse is', 0.42438272607668454)\n",
      "('the', 23, '-th step is running')\n",
      "('the rmse is', 0.4214180035681289)\n",
      "('the', 24, '-th step is running')\n",
      "('the rmse is', 0.41844182932258006)\n",
      "('the', 25, '-th step is running')\n",
      "('the rmse is', 0.4160805122871467)\n",
      "('the', 26, '-th step is running')\n",
      "('the rmse is', 0.41364513004113695)\n",
      "('the', 27, '-th step is running')\n",
      "('the rmse is', 0.41166337507293943)\n",
      "('the', 28, '-th step is running')\n",
      "('the rmse is', 0.4096038646395916)\n",
      "('the', 29, '-th step is running')\n",
      "('the rmse is', 0.40782013768659514)\n",
      "('the', 30, '-th step is running')\n",
      "('the rmse is', 0.4061967571313368)\n",
      "('the', 31, '-th step is running')\n",
      "('the rmse is', 0.4047423855851513)\n",
      "('the', 32, '-th step is running')\n",
      "('the rmse is', 0.4032389685125377)\n",
      "('the', 33, '-th step is running')\n",
      "('the rmse is', 0.40191105607985433)\n",
      "('the', 34, '-th step is running')\n",
      "('the rmse is', 0.4007420652921539)\n",
      "('the', 35, '-th step is running')\n",
      "('the rmse is', 0.39971666630230573)\n",
      "('the', 36, '-th step is running')\n",
      "('the rmse is', 0.39866441776952455)\n",
      "('the', 37, '-th step is running')\n",
      "('the rmse is', 0.3977230108876373)\n",
      "('the', 38, '-th step is running')\n",
      "('the rmse is', 0.39691268980371514)\n",
      "('the', 39, '-th step is running')\n",
      "('the rmse is', 0.39604764145557586)\n",
      "('the', 40, '-th step is running')\n",
      "('the rmse is', 0.39530301463061707)\n",
      "('the', 41, '-th step is running')\n",
      "('the rmse is', 0.3946207663913851)\n",
      "('the', 42, '-th step is running')\n",
      "('the rmse is', 0.3939894079001721)\n",
      "('the', 43, '-th step is running')\n",
      "('the rmse is', 0.3933676636602489)\n",
      "('the', 44, '-th step is running')\n",
      "('the rmse is', 0.39286804112548046)\n",
      "('the', 45, '-th step is running')\n",
      "('the rmse is', 0.392302719475914)\n",
      "('the', 46, '-th step is running')\n",
      "('the rmse is', 0.3919245682452868)\n",
      "('the', 47, '-th step is running')\n",
      "('the rmse is', 0.39141806553890063)\n",
      "('the', 48, '-th step is running')\n",
      "('the rmse is', 0.3910417063816607)\n",
      "('the', 49, '-th step is running')\n",
      "('the rmse is', 0.3906632056233198)\n",
      "('the', 50, '-th step is running')\n",
      "('the rmse is', 0.3903089717672813)\n",
      "('the', 51, '-th step is running')\n",
      "('the rmse is', 0.38998202046857955)\n",
      "('the', 52, '-th step is running')\n",
      "('the rmse is', 0.38970162863337665)\n",
      "('the', 53, '-th step is running')\n",
      "('the rmse is', 0.3893889333383191)\n",
      "('the', 54, '-th step is running')\n",
      "('the rmse is', 0.3891556726018667)\n",
      "('the', 55, '-th step is running')\n",
      "('the rmse is', 0.38890842174331586)\n",
      "('the', 56, '-th step is running')\n",
      "('the rmse is', 0.3886976895364875)\n",
      "('the', 57, '-th step is running')\n",
      "('the rmse is', 0.38847900349804626)\n",
      "('the', 58, '-th step is running')\n",
      "('the rmse is', 0.3882917616759093)\n",
      "('the', 59, '-th step is running')\n",
      "('the rmse is', 0.38809465103083746)\n",
      "('the', 60, '-th step is running')\n",
      "('the rmse is', 0.38794872403642394)\n",
      "('the', 61, '-th step is running')\n",
      "('the rmse is', 0.3877912369490584)\n",
      "('the', 62, '-th step is running')\n",
      "('the rmse is', 0.38765890842653283)\n",
      "('the', 63, '-th step is running')\n",
      "('the rmse is', 0.38751889972547315)\n",
      "('the', 64, '-th step is running')\n",
      "('the rmse is', 0.3873895937399861)\n",
      "('the', 65, '-th step is running')\n",
      "('the rmse is', 0.38728085259666206)\n",
      "('the', 66, '-th step is running')\n",
      "('the rmse is', 0.387168553280178)\n",
      "('the', 67, '-th step is running')\n",
      "('the rmse is', 0.3870714776641738)\n",
      "('the', 68, '-th step is running')\n",
      "('the rmse is', 0.38697550034293743)\n",
      "('the', 69, '-th step is running')\n",
      "('the rmse is', 0.3869015721774493)\n",
      "('the', 70, '-th step is running')\n",
      "('the rmse is', 0.3868180212848606)\n",
      "('the', 71, '-th step is running')\n",
      "('the rmse is', 0.38674081868788224)\n",
      "('the', 72, '-th step is running')\n",
      "('the rmse is', 0.38667251034107875)\n",
      "('the', 73, '-th step is running')\n",
      "('the rmse is', 0.3866047825747471)\n",
      "('the', 74, '-th step is running')\n",
      "('the rmse is', 0.3865495992469023)\n",
      "('the', 75, '-th step is running')\n",
      "('the rmse is', 0.38649328101801855)\n",
      "('the', 76, '-th step is running')\n",
      "('the rmse is', 0.3864399446683922)\n",
      "('the', 77, '-th step is running')\n",
      "('the rmse is', 0.3863937635156398)\n",
      "('the', 78, '-th step is running')\n",
      "('the rmse is', 0.38635011015743026)\n",
      "('the', 79, '-th step is running')\n",
      "('the rmse is', 0.38631125662784604)\n",
      "('the', 80, '-th step is running')\n",
      "('the rmse is', 0.38627111581230034)\n",
      "('the', 81, '-th step is running')\n",
      "('the rmse is', 0.3862375166305004)\n",
      "('the', 82, '-th step is running')\n",
      "('the rmse is', 0.386202335942353)\n",
      "('the', 83, '-th step is running')\n",
      "('the rmse is', 0.38617048306322693)\n",
      "('the', 84, '-th step is running')\n",
      "('the rmse is', 0.3861422569811459)\n",
      "('the', 85, '-th step is running')\n",
      "('the rmse is', 0.38611693733673935)\n",
      "('the', 86, '-th step is running')\n",
      "('the rmse is', 0.38609017139606255)\n",
      "('the', 87, '-th step is running')\n",
      "('the rmse is', 0.3860686273955787)\n",
      "('the', 88, '-th step is running')\n",
      "('the rmse is', 0.38604375011696757)\n",
      "('the', 89, '-th step is running')\n",
      "('the rmse is', 0.38602507114258977)\n",
      "('the', 90, '-th step is running')\n",
      "('the rmse is', 0.386006948708494)\n",
      "('the', 91, '-th step is running')\n",
      "('the rmse is', 0.38599073829891234)\n",
      "('the', 92, '-th step is running')\n",
      "('the rmse is', 0.38597422627265565)\n",
      "('the', 93, '-th step is running')\n",
      "('the rmse is', 0.38595956155515354)\n",
      "('the', 94, '-th step is running')\n",
      "('the rmse is', 0.3859458631320743)\n",
      "('the', 95, '-th step is running')\n",
      "('the rmse is', 0.385933090725392)\n",
      "('the', 96, '-th step is running')\n",
      "('the rmse is', 0.3859222768843759)\n",
      "('the', 97, '-th step is running')\n",
      "('the rmse is', 0.38590931454031274)\n",
      "('the', 98, '-th step is running')\n",
      "('the rmse is', 0.3858993972570916)\n",
      "('the', 99, '-th step is running')\n",
      "('the rmse is', 0.3858896589065217)\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()\n",
    "print \"生成训练数据...\\n\"\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print \"生成预测数据...\\n\"\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "观察RS_test、RS_train发现，训练集数据生成正常，但测试集上生成的数据在用户协同过滤和物品协同过滤的两列，全部返回的是用户打分均值，按照我自己写的代码逻辑，是在邻居集合上相似度累加和是0时返回打分均值，难道是测试集上的用户和活动都没有相似性高的邻居吗？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
