{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import pickle \n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "import scipy.spatial.distance as ssd\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "\n",
    "class RecommonderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    #用户和活动新的索引\n",
    "    self.userIndex = pickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = pickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    #用户-活动关系矩阵R\n",
    "    #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "    \n",
    "    #倒排表\n",
    "    ##每个用户参加的事件\n",
    "    self.itemsForUser = pickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "    ##事件参加的用户\n",
    "    self.usersForItem = pickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    #基于模型的协同过滤参数初始化,训练\n",
    "    self.init_SVD()\n",
    "    self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "    #根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "    \n",
    "    #根据活动属性计算出的活动之间的相似度\n",
    "    self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "    \n",
    "    #每个用户的朋友的数目\n",
    "    self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "    #用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "    \n",
    "    #活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "    self.K = K  \n",
    "    \n",
    "    #init parameters\n",
    "    #bias\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "    #the small matrix\n",
    "    self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "                  \n",
    "          \n",
    "  def train_SVD(self,trainfile = 'train.csv', steps=100,gamma=0.04,Lambda=0.15):\n",
    "    #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "    #gamma：为学习率\n",
    "    #Lambda：正则参数\n",
    "    #steps:为迭代次数\n",
    "    \n",
    "    #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "    print (\"SVD Train...\")\n",
    "    ftrain = open(trainfile, 'r')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0\n",
    "    n_records = 0\n",
    "    uids = []  #每条记录的用户索引\n",
    "    i_ids = [] #每条记录的item索引\n",
    "    #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(\",\")\n",
    "        u = self.userIndex[cols[0]]  #用户\n",
    "        i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  #interested\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    \n",
    "    #训练SVD模型\n",
    "    #加入正则和偏差项, bu[]的导数为 -eui +lambda*bu[]\n",
    "    #bi[]的导数为 -eui +lambda*bi[]\n",
    "    #P[]的导数为 -eui*Q[] + lambda*P[]\n",
    "    #Q[]的导数为 -eui*P[] + lambda*Q[]\n",
    "    #这部分与老师提供的基本一致\n",
    "#    for step in range(steps):\n",
    "#        rmse_sum=0.0\n",
    "#        print('step',step+1,'is running') \n",
    "        #将训练样本打散顺序\n",
    "#        kk = np.random.permutation(n_records)\n",
    "#        for i in range(n_records):\n",
    "#            index = kk[i]\n",
    "#            uid=uids[index]\n",
    "#            i_id=i_ids[index]\n",
    "#            rating = R[uid,i_id]\n",
    "#            predict = self.pred_SVD(uid,i_id)\n",
    "#            eui = rating - predict \n",
    "            #残差平方和\n",
    "#            rmse_sum+=eui**2\n",
    "#            \n",
    "#            self.bi[i_id] += gamma*(eui- Lambda*self.bi(i_id))\n",
    "#            self.bu[uid] += gamma*(eui - Lambda*self.bu(uid))\n",
    "#            for k in range(self.K):\n",
    "#                self.P[uid,k] += gamma * eui * self.Q[k,i_id] - Lambda*self.P[uid,k]\n",
    "#                self.Q[k,i_id] += gamma * eui * self.P[uid,k] - Lambda*self.Q[k,i_id]\n",
    "#     #学习率递减\n",
    "#        gamma=gamma*0.93        \n",
    "#    print( \"SVD trained\")\n",
    "    \n",
    "    for step in range(steps):  \n",
    "        print ('the ',step,'-th  step is running')  \n",
    "        rmse_sum=0.0 \n",
    "            \n",
    "        #将训练样本打散顺序\n",
    "        kk = np.random.permutation(n_records)  \n",
    "        for j in range(n_records):  \n",
    "            #每次一个训练样本\n",
    "            index = kk[j]  \n",
    "            #temp = self.nonzero_scores_index[b]\n",
    "            #u = temp[0]\n",
    "            #i = temp[1]\n",
    "            u = uids[index]\n",
    "            i = i_ids[index]\n",
    " \n",
    "            #预测残差\n",
    "            eui = R[u,i] - self.pred_SVD(u,i)\n",
    "            #残差平方和\n",
    "            rmse_sum+=eui**2\n",
    "               \n",
    "            #随机梯度下降，更新\n",
    "            self.bu[u]+= gamma*(eui - Lambda*self.bu[u])  \n",
    "            self.bi[i]+= gamma*(eui - Lambda*self.bi[i]) \n",
    "            \n",
    "            for k in range(self.K):\n",
    "                self.P[u,k] += gamma * eui * self.Q[k,i] - Lambda * self.P[u,k]\n",
    "                self.Q[k,i] += gamma * eui * self.P[u,k] - Lambda * self.Q[k,i]\n",
    "                \n",
    "        #学习率递减\n",
    "        gamma=gamma*0.93  \n",
    "        print(\"the rmse of the {} th step on train data is:{}\".format(step, rmse_sum))\n",
    "    print (\"SVD trained\")\n",
    "    \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    #根据当前参数，预测用户uid对Item（i_id）的打分   \n",
    "    #numpy.doc() 矩阵乘法\n",
    "    ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "        \n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "    #基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "    #这里是讲课之前，通过user对event的评分做相似度\n",
    "    #similarity = 0.0\n",
    "    #similarity=np.nan_to_num(ssd.correlation(self.userEventScores[uid1],self.userEventScores[uid2]))\n",
    "    \n",
    "    si={}  #有效item（两个用户均有打分的item）的集合\n",
    "    for item in self.itemsForUser[uid1]:  #uid1所有打过分的Item1\n",
    "        if item in self.itemsForUser[uid2]:  #如果uid2也对该Item打过分\n",
    "            si[item]=1  #item为一个有效item\n",
    "        \n",
    "    #print si\n",
    "    n=len(si)   #有效item数，有效item为即对uid对Item打过分，uid2也对Item打过分\n",
    "    if (n==0):  #没有共同打过分的item，相似度设为0？\n",
    "        similarity=0  \n",
    "        return similarity  \n",
    "        \n",
    "    #用户uid1打过分的所有有效的item\n",
    "    #s1保存用户uid1对所有有效item的打分\n",
    "    s1=np.array([self.userEventScores[uid1,item] for item in si])  \n",
    "        \n",
    "    #用户uid2打过分的所有有效的Item\n",
    "    s2=np.array([self.userEventScores[uid2,item] for item in si])  \n",
    "        \n",
    "    sum1=np.sum(s1)  \n",
    "    sum2=np.sum(s2)  \n",
    "    sum1Sq=np.sum(s1**2)  \n",
    "    sum2Sq=np.sum(s2**2)  \n",
    "    pSum=np.sum(s1*s2)  \n",
    "        \n",
    "    #分子\n",
    "    num=pSum-(sum1*sum2/n)  \n",
    "        \n",
    "    #分母\n",
    "    den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "    if den==0:  \n",
    "        similarity=0  \n",
    "        return 0  \n",
    "        \n",
    "    similarity = num/den  \n",
    "    return similarity  \n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    #讲课前的思路是计算所有user与传入的userid 的相似度，然后取前K个相似度比较大的，预测userid对eventid的评分\n",
    "    #取与userid 相似度最高的 5个用户，做推荐\n",
    "    #ans = 0.0\n",
    "    #i = self.userIndex[userId]\n",
    "    #j = self.eventIndex[eventId]\n",
    "    #sim_list=dict()\n",
    "    #for k in range(n_users):\n",
    "    #if k==i:\n",
    "    #    continue\n",
    "    #else:\n",
    "    #    sim_list[k]=sim_cal_UserCF(i,k)\n",
    "    \n",
    "    #取前5个最大相似度的\n",
    "    #for id,sim in sorted(sim_list.items(),key= lambda item:item[1],reverse = True)[:5] :\n",
    "    #     fenzi+=(userEventScores[id,j] - userEventScores[id].sum()/n_items)*sim\n",
    "    #     fenmu+=sim\n",
    "    #以为ans为 是userid 对 eventid的预测 呢，看老师的代码不是\n",
    "    #ans = userEventScores[i].sum()/n_items + fenzi/fenmu \n",
    "    \n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "    \n",
    "    sim_accumulate=0.0  \n",
    "    rat_acc=0.0  \n",
    "\n",
    "    for user in self.usersForItem[i]:  #对eventId打过分的所有用户\n",
    "        #print user, u\n",
    "        sim = self.sim_cal_UserCF(uid1 = user,uid2 = u)    #该user与uid之间的相似度\n",
    "        if sim == 0:continue  \n",
    "            #print sim,self.user_movie[uid][item],sim*self.user_movie[uid][item]  \n",
    "            \n",
    "        #u2 = self.userIndex[user]\n",
    "        rat_acc += sim * self.userEventScores[user,i]   #用户user对eventId的打分\n",
    "        sim_accumulate += sim  \n",
    "        \n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu  \n",
    "    ans = rat_acc/sim_accumulate  \n",
    "    \n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0  \n",
    "    return ans\n",
    "\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    #讲课前做的，基于评分矩阵做的余弦相似度\n",
    "    #计算Item i_id1和i_id2之间的相似性\n",
    "    #请补充完整代码\n",
    "    #similarity = 0.0\n",
    "    #使用余弦相似度 计算 event之间相似度，\n",
    "    #num= float(self.userEventScores[:,i_id1].T*self.userEventScores[:,i_id2])\n",
    "    #denom = la.norm(self.userEventScores[1])*la.norm(self.userEventScores[2])\n",
    "    #similarity = np.nan_to_num(0.5+0.5*(num/denom))\n",
    "    #return similarity    \n",
    "\n",
    "    #计算Item i_id1和i_id2之间的相似性\n",
    "    si={}  #有效用户集合\n",
    "    for user in self.usersForItem[i_id1]:  #所有对Item1打过分的的user\n",
    "        if user in self.usersForItem[i_id2]:  #如果该用户对Item2也打过分\n",
    "            si[user]=1  #user为一个有效用用户\n",
    "        \n",
    "    n=len(si)   #有效用户数，有效用户为即对Item1打过分，也对Item2打过分\n",
    "    if (n==0):  #没有共同打过分的用户，相似度设为0？\n",
    "        return 0  \n",
    "        \n",
    "    #所有有效用户对Item1的打分\n",
    "    #i1 = self.eventIndex[i_id1]\n",
    "    #i2 = self.eventIndex[i_id2]\n",
    "    s1=np.array([self.userEventScores[u, i_id1] for u in si])  \n",
    "        \n",
    "    #所有有效用户对Item2的打分\n",
    "    s2=np.array([self.userEventScores[u, i_id2] for u in si])  \n",
    "        \n",
    "    sum1=np.sum(s1)  \n",
    "    sum2=np.sum(s2)  \n",
    "    sum1Sq=np.sum(s1**2)  \n",
    "    sum2Sq=np.sum(s2**2)  \n",
    "    pSum=np.sum(s1*s2)  \n",
    "        \n",
    "    #分子\n",
    "    num=pSum-(sum1*sum2/n)  \n",
    "        \n",
    "    #分母\n",
    "    den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "    if den==0:  \n",
    "        return 0  \n",
    "        \n",
    "    return num/den  \n",
    "  \n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    #讲课前这部分没写，打算和基于用户的协同推荐同样处理\n",
    "    \n",
    "    ans=0.0\n",
    "    \n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "\n",
    "    sim_accumulate=0.0  \n",
    "    rat_acc=0.0  \n",
    "                   \n",
    "    for item in self.itemsForUser[u]:  #用户uid打过分的所有Item\n",
    "        #i2 = self.eventIndex[item]\n",
    "        sim = self.sim_cal_ItemCF(item,i)    #该Item与i_id之间的相似度\n",
    "           \n",
    "        rat_acc += sim * self.userEventScores[u,item]  \n",
    "        sim_accumulate += sim  \n",
    "        \n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu  \n",
    "\n",
    "    ans = rat_acc/sim_accumulate  \n",
    "\n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    return ans\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    #基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "\n",
    "    return self.pred_SVD(u,i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if userId in self.userIndex:\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "    用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'r')\n",
    "    fout = open(\"RS_\" + fn, 'w')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "      ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "      if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "      fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "      ln += 1\n",
    "      if ln%500 == 0:\n",
    "          print (\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "          #break;\n",
    "      \n",
    "      cols = line.strip().split(\",\")\n",
    "      userId = cols[0]\n",
    "      eventId = cols[1]\n",
    "      invited = cols[2]\n",
    "      \n",
    "      userCF_reco = RS.userCFReco(userId, eventId)\n",
    "      itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "      svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "      user_reco = RS.userReco(userId, eventId)\n",
    "      evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "      user_pop = RS.userPop(userId)\n",
    "     \n",
    "      frnd_infl = RS.friendInfluence(userId)\n",
    "      evt_pop = RS.eventPop(eventId)\n",
    "      ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "      if train:\n",
    "        ocols.append(cols[4]) # interested\n",
    "        ocols.append(cols[5]) # not_interested\n",
    "      fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "the  0 -th  step is running\n",
      "the rmse of the 0 th step on train data is:5809.267618017776\n",
      "the  1 -th  step is running\n",
      "the rmse of the 1 th step on train data is:2606.043658439505\n",
      "the  2 -th  step is running\n",
      "the rmse of the 2 th step on train data is:2344.783992345352\n",
      "the  3 -th  step is running\n",
      "the rmse of the 3 th step on train data is:2193.664143197552\n",
      "the  4 -th  step is running\n",
      "the rmse of the 4 th step on train data is:2072.370989461395\n",
      "the  5 -th  step is running\n",
      "the rmse of the 5 th step on train data is:1975.709836761682\n",
      "the  6 -th  step is running\n",
      "the rmse of the 6 th step on train data is:1899.1016269730076\n",
      "the  7 -th  step is running\n",
      "the rmse of the 7 th step on train data is:1832.9886035964516\n",
      "the  8 -th  step is running\n",
      "the rmse of the 8 th step on train data is:1779.4191532251994\n",
      "the  9 -th  step is running\n",
      "the rmse of the 9 th step on train data is:1730.793035752813\n",
      "the  10 -th  step is running\n",
      "the rmse of the 10 th step on train data is:1690.6556052035269\n",
      "the  11 -th  step is running\n",
      "the rmse of the 11 th step on train data is:1653.3440716315947\n",
      "the  12 -th  step is running\n",
      "the rmse of the 12 th step on train data is:1622.1554915292952\n",
      "the  13 -th  step is running\n",
      "the rmse of the 13 th step on train data is:1593.1646106790147\n",
      "the  14 -th  step is running\n",
      "the rmse of the 14 th step on train data is:1568.4587260361197\n",
      "the  15 -th  step is running\n",
      "the rmse of the 15 th step on train data is:1546.1900839857872\n",
      "the  16 -th  step is running\n",
      "the rmse of the 16 th step on train data is:1525.6864698425168\n",
      "the  17 -th  step is running\n",
      "the rmse of the 17 th step on train data is:1507.153004265089\n",
      "the  18 -th  step is running\n",
      "the rmse of the 18 th step on train data is:1490.687810219654\n",
      "the  19 -th  step is running\n",
      "the rmse of the 19 th step on train data is:1476.4461631946645\n",
      "the  20 -th  step is running\n",
      "the rmse of the 20 th step on train data is:1462.947707395626\n",
      "the  21 -th  step is running\n",
      "the rmse of the 21 th step on train data is:1449.5640182030388\n",
      "the  22 -th  step is running\n",
      "the rmse of the 22 th step on train data is:1439.0760159057627\n",
      "the  23 -th  step is running\n",
      "the rmse of the 23 th step on train data is:1428.6026095341867\n",
      "the  24 -th  step is running\n",
      "the rmse of the 24 th step on train data is:1419.152441262578\n",
      "the  25 -th  step is running\n",
      "the rmse of the 25 th step on train data is:1410.4978020766669\n",
      "the  26 -th  step is running\n",
      "the rmse of the 26 th step on train data is:1402.4432482058396\n",
      "the  27 -th  step is running\n",
      "the rmse of the 27 th step on train data is:1395.7786147623751\n",
      "the  28 -th  step is running\n",
      "the rmse of the 28 th step on train data is:1389.0951219274548\n",
      "the  29 -th  step is running\n",
      "the rmse of the 29 th step on train data is:1382.611964013258\n",
      "the  30 -th  step is running\n",
      "the rmse of the 30 th step on train data is:1377.3492210523264\n",
      "the  31 -th  step is running\n",
      "the rmse of the 31 th step on train data is:1372.0168676620747\n",
      "the  32 -th  step is running\n",
      "the rmse of the 32 th step on train data is:1367.4401734654894\n",
      "the  33 -th  step is running\n",
      "the rmse of the 33 th step on train data is:1362.8277708268724\n",
      "the  34 -th  step is running\n",
      "the rmse of the 34 th step on train data is:1359.0655950319835\n",
      "the  35 -th  step is running\n",
      "the rmse of the 35 th step on train data is:1355.095054907318\n",
      "the  36 -th  step is running\n",
      "the rmse of the 36 th step on train data is:1351.644148674919\n",
      "the  37 -th  step is running\n",
      "the rmse of the 37 th step on train data is:1348.4884005456051\n",
      "the  38 -th  step is running\n",
      "the rmse of the 38 th step on train data is:1345.4422123603817\n",
      "the  39 -th  step is running\n",
      "the rmse of the 39 th step on train data is:1342.6517559071979\n",
      "the  40 -th  step is running\n",
      "the rmse of the 40 th step on train data is:1340.426940024322\n",
      "the  41 -th  step is running\n",
      "the rmse of the 41 th step on train data is:1337.8439828968765\n",
      "the  42 -th  step is running\n",
      "the rmse of the 42 th step on train data is:1335.8286070405125\n",
      "the  43 -th  step is running\n",
      "the rmse of the 43 th step on train data is:1333.7920354312355\n",
      "the  44 -th  step is running\n",
      "the rmse of the 44 th step on train data is:1331.9772838809283\n",
      "the  45 -th  step is running\n",
      "the rmse of the 45 th step on train data is:1330.2475003008637\n",
      "the  46 -th  step is running\n",
      "the rmse of the 46 th step on train data is:1328.704492006983\n",
      "the  47 -th  step is running\n",
      "the rmse of the 47 th step on train data is:1327.1816428207862\n",
      "the  48 -th  step is running\n",
      "the rmse of the 48 th step on train data is:1325.7504364669155\n",
      "the  49 -th  step is running\n",
      "the rmse of the 49 th step on train data is:1324.584382712133\n",
      "the  50 -th  step is running\n",
      "the rmse of the 50 th step on train data is:1323.297626584529\n",
      "the  51 -th  step is running\n",
      "the rmse of the 51 th step on train data is:1322.2965475251558\n",
      "the  52 -th  step is running\n",
      "the rmse of the 52 th step on train data is:1321.2346651486591\n",
      "the  53 -th  step is running\n",
      "the rmse of the 53 th step on train data is:1320.3818028508197\n",
      "the  54 -th  step is running\n",
      "the rmse of the 54 th step on train data is:1319.3814827750439\n",
      "the  55 -th  step is running\n",
      "the rmse of the 55 th step on train data is:1318.5334687050047\n",
      "the  56 -th  step is running\n",
      "the rmse of the 56 th step on train data is:1317.814080026966\n",
      "the  57 -th  step is running\n",
      "the rmse of the 57 th step on train data is:1317.1208133287998\n",
      "the  58 -th  step is running\n",
      "the rmse of the 58 th step on train data is:1316.4725456607\n",
      "the  59 -th  step is running\n",
      "the rmse of the 59 th step on train data is:1315.9024887580065\n",
      "the  60 -th  step is running\n",
      "the rmse of the 60 th step on train data is:1315.3327268273079\n",
      "the  61 -th  step is running\n",
      "the rmse of the 61 th step on train data is:1314.8132383526993\n",
      "the  62 -th  step is running\n",
      "the rmse of the 62 th step on train data is:1314.3333421380212\n",
      "the  63 -th  step is running\n",
      "the rmse of the 63 th step on train data is:1313.8822051139812\n",
      "the  64 -th  step is running\n",
      "the rmse of the 64 th step on train data is:1313.4762755366714\n",
      "the  65 -th  step is running\n",
      "the rmse of the 65 th step on train data is:1313.0547958343404\n",
      "the  66 -th  step is running\n",
      "the rmse of the 66 th step on train data is:1312.7017939304012\n",
      "the  67 -th  step is running\n",
      "the rmse of the 67 th step on train data is:1312.3948384364764\n",
      "the  68 -th  step is running\n",
      "the rmse of the 68 th step on train data is:1312.068037190889\n",
      "the  69 -th  step is running\n",
      "the rmse of the 69 th step on train data is:1311.791680846824\n",
      "the  70 -th  step is running\n",
      "the rmse of the 70 th step on train data is:1311.5021025429153\n",
      "the  71 -th  step is running\n",
      "the rmse of the 71 th step on train data is:1311.2542334306263\n",
      "the  72 -th  step is running\n",
      "the rmse of the 72 th step on train data is:1311.0147558221738\n",
      "the  73 -th  step is running\n",
      "the rmse of the 73 th step on train data is:1310.7968556372139\n",
      "the  74 -th  step is running\n",
      "the rmse of the 74 th step on train data is:1310.5977874945286\n",
      "the  75 -th  step is running\n",
      "the rmse of the 75 th step on train data is:1310.410038003861\n",
      "the  76 -th  step is running\n",
      "the rmse of the 76 th step on train data is:1310.252812204116\n",
      "the  77 -th  step is running\n",
      "the rmse of the 77 th step on train data is:1310.0845649227952\n",
      "the  78 -th  step is running\n",
      "the rmse of the 78 th step on train data is:1309.9207383266041\n",
      "the  79 -th  step is running\n",
      "the rmse of the 79 th step on train data is:1309.7918422773234\n",
      "the  80 -th  step is running\n",
      "the rmse of the 80 th step on train data is:1309.6519386657021\n",
      "the  81 -th  step is running\n",
      "the rmse of the 81 th step on train data is:1309.5419979862863\n",
      "the  82 -th  step is running\n",
      "the rmse of the 82 th step on train data is:1309.4197124030431\n",
      "the  83 -th  step is running\n",
      "the rmse of the 83 th step on train data is:1309.3272528865675\n",
      "the  84 -th  step is running\n",
      "the rmse of the 84 th step on train data is:1309.2284346646006\n",
      "the  85 -th  step is running\n",
      "the rmse of the 85 th step on train data is:1309.1349489923866\n",
      "the  86 -th  step is running\n",
      "the rmse of the 86 th step on train data is:1309.0556504232916\n",
      "the  87 -th  step is running\n",
      "the rmse of the 87 th step on train data is:1308.972537231008\n",
      "the  88 -th  step is running\n",
      "the rmse of the 88 th step on train data is:1308.9101514272988\n",
      "the  89 -th  step is running\n",
      "the rmse of the 89 th step on train data is:1308.8315206445118\n",
      "the  90 -th  step is running\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "the rmse of the 90 th step on train data is:1308.7673813740612\n",
      "the  91 -th  step is running\n",
      "the rmse of the 91 th step on train data is:1308.7106712311397\n",
      "the  92 -th  step is running\n",
      "the rmse of the 92 th step on train data is:1308.653564598768\n",
      "the  93 -th  step is running\n",
      "the rmse of the 93 th step on train data is:1308.6045055151187\n",
      "the  94 -th  step is running\n",
      "the rmse of the 94 th step on train data is:1308.5586039831621\n",
      "the  95 -th  step is running\n",
      "the rmse of the 95 th step on train data is:1308.5129818313865\n",
      "the  96 -th  step is running\n",
      "the rmse of the 96 th step on train data is:1308.47543879726\n",
      "the  97 -th  step is running\n",
      "the rmse of the 97 th step on train data is:1308.4375252042737\n",
      "the  98 -th  step is running\n",
      "the rmse of the 98 th step on train data is:1308.403246989077\n",
      "the  99 -th  step is running\n",
      "the rmse of the 99 th step on train data is:1308.3714195233285\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n",
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()\n",
    "print (\"生成训练数据...\\n\")\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print (\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
