{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 作业"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "导入包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import _pickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "\n",
    "class RecommonderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    #用户和活动新的索引\n",
    "    self.userIndex = _pickle.load(open(\"./data/PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = _pickle.load(open(\"./data/PE_eventIndex.pkl\", 'rb'))\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    #用户-活动关系矩阵R\n",
    "    #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"./data/PE_userEventScores\").todense()\n",
    "    \n",
    "    #倒排表\n",
    "    ##每个用户参加的事件\n",
    "    self.itemsForUser = _pickle.load(open(\"./data/PE_eventsForUser.pkl\", 'rb'))\n",
    "    ##事件参加的用户\n",
    "    self.usersForItem = _pickle.load(open(\"./data/PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    #基于模型的协同过滤参数初始化,训练\n",
    "    self.init_SVD()\n",
    "    self.train_SVD(trainfile = \"./data/train.csv\")\n",
    "    \n",
    "    #根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"./data/US_userSimMatrix\").todense()\n",
    "    \n",
    "    #根据活动属性计算出的活动之间的相似度\n",
    "    self.eventPropSim = sio.mmread(\"./data/EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"./data/EV_eventContSim\").todense()\n",
    "    \n",
    "    #每个用户的朋友的数目\n",
    "    self.numFriends = sio.mmread(\"./data/UF_numFriends\")\n",
    "    #用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"./data/UF_userFriends\").todense()\n",
    "    \n",
    "    #活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"./data/EA_eventPopularity\").todense()\n",
    "    \n",
    "    n_Users = self.itemsForUser+1  #数组的索引从0开始，浪费第0个元素\n",
    "    self.similarity = np.zeros((n_Users, n_Users), dtype=np.float)\n",
    "    self.similarity[:,:] = -1\n",
    "    \n",
    "    n_Items = len(self.UsersForItem)+1 #数组的索引从0开始，浪费第0个元素\n",
    "    self.similarity1 = np.zeros((n_Items, n_Items), dtype=np.float)\n",
    "    self.similarity1[:,:] = -1\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "    self.K = K  \n",
    "    \n",
    "    #init parameters\n",
    "    #bias\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "    #the small matrix\n",
    "    #self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    #self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "    \n",
    "    self.P = {}\n",
    "    self.Q = {}\n",
    "                  \n",
    "          \n",
    "  def train_SVD(self,trainfile = './data/train.csv', steps=100,gamma=0.04,Lambda=0.15):\n",
    "    #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "    #gamma：为学习率\n",
    "    #Lambda：正则参数\n",
    "    \n",
    "    #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "    print(\"SVD Train...\")\n",
    "    ftrain = open(trainfile, 'r')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0\n",
    "    n_records = 0\n",
    "    uids = []  #每条记录的用户索引\n",
    "    i_ids = [] #每条记录的item索引\n",
    "    #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(\",\")\n",
    "        u = self.userIndex[cols[0]]  #用户\n",
    "        i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  #interested\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "        \n",
    "        self.Q.setdefault(i,random((self.K,1))/10*(np.sqrt(self.K)))  \n",
    "        self.P.setdefault(u,random((self.K,1))/10*(np.sqrt(self.K)))  \n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    \n",
    "    # 请补充完整SVD模型训练过程\n",
    "    for step in range(steps):\n",
    "        print('the ',step,'-th  step is running')\n",
    "        rmse_sum=0.0 \n",
    "\n",
    "        #将训练样本打散顺序\n",
    "        kk = np.random.permutation(n_records)  \n",
    "        for j in range(n_records):  \n",
    "\n",
    "            #每次一个训练样本\n",
    "            i=kk[j]  \n",
    "            uid=uids[i]  \n",
    "            i_id=i_ids[i]  \n",
    "            rat=R[uid][i_id]  \n",
    "\n",
    "            #预测残差\n",
    "            eui=rat-self.pred_SVD(uid,i_id)  \n",
    "            #残差平方和\n",
    "            rmse_sum+=eui**2  \n",
    "\n",
    "            #随机梯度下降，更新\n",
    "            self.bu[uid]+=gamma*(eui-Lambda*self.bu[uid])  \n",
    "            self.bi[i_id]+=gamma*(eui-Lambda*self.bi[i_id]) \n",
    "\n",
    "            temp=self.Q[i_id]  \n",
    "            self.Q[i_id]+=gamma*(eui*self.P[uid]-Lambda*self.Q[i_id])  \n",
    "            self.P[uid]+=gamma*(eui*temp-Lambda*self.P[uid])  \n",
    "\n",
    "        #学习率递减\n",
    "        gamma=gamma*0.93  \n",
    "        print(\"the rmse of this step on train data is \",np.sqrt(rmse_sum/n_records))\n",
    "    \n",
    "    print(\"SVD trained\")\n",
    "    \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "    #ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "    \n",
    "    if (self.Q[i_id].all()==None):  \n",
    "        self.Q[i_id]=np.zeros((self.K,1))  \n",
    "    if (self.P[uid].all()==None):  \n",
    "        self.P[uid]=np.zeros((self.K,1))  \n",
    "\n",
    "    ans=self.mu + self.bi[i_id] + self.bu[uid] + np.sum(self.Q[i_id]*self.P[uid])\n",
    "        \n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "    #请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "    similarity_val = 0.0\n",
    "    \n",
    "    if self.similarity[uid1][uid2]!=-1:  #如果已经计算好\n",
    "            return self.similarity[uid1][uid2]  \n",
    "        \n",
    "    si={}  \n",
    "    for item in self.ItemsForUser[uid1]:  #uid1所有打过分的Item1\n",
    "        if item in self.ItemsForUser[uid2]:  #如果uid2也对该Item打过分\n",
    "            si[item]=1  #item为一个有效item\n",
    "\n",
    "    #print si\n",
    "    n=len(si)   #有效item数，有效item为即对uid对Item打过分，uid2也对Item打过分\n",
    "    if (n==0):  #没有共同打过分的item，相似度设为1.因为最低打分为1？\n",
    "        self.similarity[uid1][uid2]=0  \n",
    "        self.similarity[uid1][uid2]=0\n",
    "        return 0  \n",
    "\n",
    "    #用户uid1打过分的所有有效的item\n",
    "    s1=np.array([self.ItemsForUser[uid1][item] for item in si])  \n",
    "\n",
    "    #用户uid2打过分的所有有效的Item\n",
    "    s2=np.array([self.ItemsForUser[uid2][item] for item in si])  \n",
    "\n",
    "    sum1=np.sum(s1)  \n",
    "    sum2=np.sum(s2)  \n",
    "    sum1Sq=np.sum(s1**2)  \n",
    "    sum2Sq=np.sum(s2**2)  \n",
    "    pSum=np.sum(s1*s2)  \n",
    "\n",
    "    #分子\n",
    "    num=pSum-(sum1*sum2/n)  \n",
    "\n",
    "    #分母\n",
    "    den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "    if den==0:  \n",
    "        self.similarity[uid1][uid2]=0  \n",
    "        self.similarity[uid2][uid1]=0\n",
    "        return 0  \n",
    "\n",
    "    similarity_val = num/den\n",
    "    self.similarity[uid1][uid2]=num/den  \n",
    "    self.similarity[uid2][uid1]=num/den\n",
    "    return similarity_val  \n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0\n",
    "    \n",
    "    sim_accumulate=0.0  \n",
    "    rat_acc=0.0  \n",
    "\n",
    "    for user in self.UsersForItem[eventId]:  #对i_id打过分的所有用户\n",
    "        sim = self.sim_cal_UserCF(user,userId)    #该user与uid之间的相似度\n",
    "        if sim<=0:continue  \n",
    "        #print sim,self.user_movie[uid][item],sim*self.user_movie[uid][item]  \n",
    "\n",
    "        rat_acc += sim * self.UsersForItem[eventId][user] \n",
    "        sim_accumulate += sim  \n",
    "\n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu  \n",
    "    ans = rat_acc/sim_accumulate\n",
    "  \n",
    "    return ans\n",
    "\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    #计算Item i_id1和i_id2之间的相似性\n",
    "    #请补充完整代码\n",
    "    similarity_val = 0.0\n",
    "    \n",
    "    if self.similarity1[i_id1][i_id2]!=-1:  #如果已经计算好\n",
    "        return self.similarity1[i_id1][i_id2]  \n",
    "        \n",
    "    si={}  \n",
    "    for user in self.UsersForItem[i_id1]:  #所有对Item1打过分的的user\n",
    "        if user in self.UsersForItem[i_id2]:  #如果该用户对Item2也打过分\n",
    "            #print self.UsersForItem[i_id2]\n",
    "            si[user]=1  #user为一个有效用用户\n",
    "\n",
    "    #print si\n",
    "    n=len(si)   #有效用户数，有效用户为即对Item1打过分，也对Item2打过分\n",
    "    if (n==0):  #没有共同打过分的用户，相似度设为1.因为最低打分为1？\n",
    "        self.similarity1[i_id1][i_id2]=0  \n",
    "        self.similarity1[i_id1][i_id1]=0  \n",
    "        return 0  \n",
    "\n",
    "    #所有有效用户对Item1的打分\n",
    "    s1=np.array([self.UsersForItem[i_id1][u] for u in si])  \n",
    "\n",
    "    #所有有效用户对Item2的打分\n",
    "    s2=np.array([self.UsersForItem[i_id2][u] for u in si])  \n",
    "\n",
    "    sum1=np.sum(s1)  \n",
    "    sum2=np.sum(s2)  \n",
    "    sum1Sq=np.sum(s1**2)  \n",
    "    sum2Sq=np.sum(s2**2)  \n",
    "    pSum=np.sum(s1*s2)  \n",
    "\n",
    "    #分子\n",
    "    num=pSum-(sum1*sum2/n)  \n",
    "\n",
    "    #分母\n",
    "    den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))  \n",
    "    if den==0:  \n",
    "        self.similarity1[i_id1][i_id2]=0  \n",
    "        self.similarity1[i_id2][i_id1]=0  \n",
    "        return 0  \n",
    "\n",
    "    self.similarity1[i_id1][i_id2]=num/den  \n",
    "    self.similarity1[i_id2][i_id1]=num/den  \n",
    "    similarity_val = num/den\n",
    "    \n",
    "    return similarity_val\n",
    "        \n",
    "    #return num/den  \n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0\n",
    "    \n",
    "    sim_accumulate=0.0  \n",
    "    rat_acc=0.0  \n",
    "\n",
    "    for item in self.ItemsForUser[userId]:  #用户uid打过分的所有Item\n",
    "        sim = self.sim_cal_ItemCF(item,eventId)    #该Item与i_id之间的相似度\n",
    "        if sim<0:continue  \n",
    "        #print sim,self.user_movie[uid][item],sim*self.user_movie[uid][item]  \n",
    "\n",
    "        rat_acc += sim * self.ItemsForUser[userId][item]\n",
    "        sim_accumulate += sim  \n",
    "\n",
    "    #print rat_acc,sim_accumulate  \n",
    "    if sim_accumulate==0: #no same user rated,return average rates of the data  \n",
    "        return  self.mu  \n",
    "    ans = rat_acc/sim_accumulate\n",
    "    \n",
    "    return ans\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    #基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "\n",
    "    return self.pred_SVD(u,i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if self.userIndex.has_key(userId):\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "    用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user</th>\n",
       "      <th>event</th>\n",
       "      <th>invited</th>\n",
       "      <th>timestamp</th>\n",
       "      <th>interested</th>\n",
       "      <th>not_interested</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1918771225</td>\n",
       "      <td>0</td>\n",
       "      <td>2012-10-02 15:53:05.754000+00:00</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1502284248</td>\n",
       "      <td>0</td>\n",
       "      <td>2012-10-02 15:53:05.754000+00:00</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3044012</td>\n",
       "      <td>2529072432</td>\n",
       "      <td>0</td>\n",
       "      <td>2012-10-02 15:53:05.754000+00:00</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3044012</td>\n",
       "      <td>3072478280</td>\n",
       "      <td>0</td>\n",
       "      <td>2012-10-02 15:53:05.754000+00:00</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1390707377</td>\n",
       "      <td>0</td>\n",
       "      <td>2012-10-02 15:53:05.754000+00:00</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      user       event  invited                         timestamp  interested  \\\n",
       "0  3044012  1918771225        0  2012-10-02 15:53:05.754000+00:00           0   \n",
       "1  3044012  1502284248        0  2012-10-02 15:53:05.754000+00:00           0   \n",
       "2  3044012  2529072432        0  2012-10-02 15:53:05.754000+00:00           1   \n",
       "3  3044012  3072478280        0  2012-10-02 15:53:05.754000+00:00           0   \n",
       "4  3044012  1390707377        0  2012-10-02 15:53:05.754000+00:00           0   \n",
       "\n",
       "   not_interested  \n",
       "0               0  \n",
       "1               0  \n",
       "2               0  \n",
       "3               0  \n",
       "4               0  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_path = './data/'\n",
    "df_train = pd.read_csv(data_path + 'train.csv')\n",
    "df_train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "df_train.drop(['timestamp','invited','not_interested'], axis=1, inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user</th>\n",
       "      <th>event</th>\n",
       "      <th>interested</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1918771225</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1502284248</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3044012</td>\n",
       "      <td>2529072432</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3044012</td>\n",
       "      <td>3072478280</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>3044012</td>\n",
       "      <td>1390707377</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      user       event  interested\n",
       "0  3044012  1918771225           0\n",
       "1  3044012  1502284248           0\n",
       "2  3044012  2529072432           1\n",
       "3  3044012  3072478280           0\n",
       "4  3044012  1390707377           0"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 15398 entries, 0 to 15397\n",
      "Data columns (total 3 columns):\n",
      "user          15398 non-null int64\n",
      "event         15398 non-null int64\n",
      "interested    15398 non-null int64\n",
      "dtypes: int64(3)\n",
      "memory usage: 361.0 KB\n"
     ]
    }
   ],
   "source": [
    "df_train.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_data = df_train.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "the  0 -th  step is running\n",
      "the rmse of this step on train data is  0.740219675469\n",
      "the  1 -th  step is running\n",
      "the rmse of this step on train data is  0.534445324415\n",
      "the  2 -th  step is running\n",
      "the rmse of this step on train data is  0.428669727665\n",
      "the  3 -th  step is running\n",
      "the rmse of this step on train data is  0.380572286418\n",
      "the  4 -th  step is running\n",
      "the rmse of this step on train data is  0.353631839448\n",
      "the  5 -th  step is running\n",
      "the rmse of this step on train data is  0.336208976879\n",
      "the  6 -th  step is running\n",
      "the rmse of this step on train data is  0.32343691405\n",
      "the  7 -th  step is running\n",
      "the rmse of this step on train data is  0.313451911486\n",
      "the  8 -th  step is running\n",
      "the rmse of this step on train data is  0.305606904041\n",
      "the  9 -th  step is running\n",
      "the rmse of this step on train data is  0.299043790879\n",
      "the  10 -th  step is running\n",
      "the rmse of this step on train data is  0.293219919787\n",
      "the  11 -th  step is running\n",
      "the rmse of this step on train data is  0.288648101119\n",
      "the  12 -th  step is running\n",
      "the rmse of this step on train data is  0.284408587989\n",
      "the  13 -th  step is running\n",
      "the rmse of this step on train data is  0.280747140595\n",
      "the  14 -th  step is running\n",
      "the rmse of this step on train data is  0.277500206993\n",
      "the  15 -th  step is running\n",
      "the rmse of this step on train data is  0.274602099678\n",
      "the  16 -th  step is running\n",
      "the rmse of this step on train data is  0.272205283632\n",
      "the  17 -th  step is running\n",
      "the rmse of this step on train data is  0.270055745952\n",
      "the  18 -th  step is running\n",
      "the rmse of this step on train data is  0.267951096336\n",
      "the  19 -th  step is running\n",
      "the rmse of this step on train data is  0.266175888002\n",
      "the  20 -th  step is running\n",
      "the rmse of this step on train data is  0.264556647389\n",
      "the  21 -th  step is running\n",
      "the rmse of this step on train data is  0.263085501572\n",
      "the  22 -th  step is running\n",
      "the rmse of this step on train data is  0.261771095279\n",
      "the  23 -th  step is running\n",
      "the rmse of this step on train data is  0.260565303036\n",
      "the  24 -th  step is running\n",
      "the rmse of this step on train data is  0.259519143311\n",
      "the  25 -th  step is running\n",
      "the rmse of this step on train data is  0.258447616659\n",
      "the  26 -th  step is running\n",
      "the rmse of this step on train data is  0.257464248328\n",
      "the  27 -th  step is running\n",
      "the rmse of this step on train data is  0.25681601533\n",
      "the  28 -th  step is running\n",
      "the rmse of this step on train data is  0.255968647758\n",
      "the  29 -th  step is running\n",
      "the rmse of this step on train data is  0.255327214801\n",
      "the  30 -th  step is running\n",
      "the rmse of this step on train data is  0.254675907804\n",
      "the  31 -th  step is running\n",
      "the rmse of this step on train data is  0.254114779588\n",
      "the  32 -th  step is running\n",
      "the rmse of this step on train data is  0.253534991947\n",
      "the  33 -th  step is running\n",
      "the rmse of this step on train data is  0.253043910252\n",
      "the  34 -th  step is running\n",
      "the rmse of this step on train data is  0.252630467233\n",
      "the  35 -th  step is running\n",
      "the rmse of this step on train data is  0.252134857729\n",
      "the  36 -th  step is running\n",
      "the rmse of this step on train data is  0.251794129484\n",
      "the  37 -th  step is running\n",
      "the rmse of this step on train data is  0.251412796426\n",
      "the  38 -th  step is running\n",
      "the rmse of this step on train data is  0.251112604371\n",
      "the  39 -th  step is running\n",
      "the rmse of this step on train data is  0.250791397525\n",
      "the  40 -th  step is running\n",
      "the rmse of this step on train data is  0.250524572533\n",
      "the  41 -th  step is running\n",
      "the rmse of this step on train data is  0.250253874683\n",
      "the  42 -th  step is running\n",
      "the rmse of this step on train data is  0.249974022552\n",
      "the  43 -th  step is running\n",
      "the rmse of this step on train data is  0.249772697204\n",
      "the  44 -th  step is running\n",
      "the rmse of this step on train data is  0.249563255138\n",
      "the  45 -th  step is running\n",
      "the rmse of this step on train data is  0.249380021122\n",
      "the  46 -th  step is running\n",
      "the rmse of this step on train data is  0.249199860839\n",
      "the  47 -th  step is running\n",
      "the rmse of this step on train data is  0.249042840757\n",
      "the  48 -th  step is running\n",
      "the rmse of this step on train data is  0.248881290002\n",
      "the  49 -th  step is running\n",
      "the rmse of this step on train data is  0.248736689478\n",
      "the  50 -th  step is running\n",
      "the rmse of this step on train data is  0.248609057938\n",
      "the  51 -th  step is running\n",
      "the rmse of this step on train data is  0.248486924453\n",
      "the  52 -th  step is running\n",
      "the rmse of this step on train data is  0.248391240526\n",
      "the  53 -th  step is running\n",
      "the rmse of this step on train data is  0.248271918491\n",
      "the  54 -th  step is running\n",
      "the rmse of this step on train data is  0.248179867804\n",
      "the  55 -th  step is running\n",
      "the rmse of this step on train data is  0.248082227601\n",
      "the  56 -th  step is running\n",
      "the rmse of this step on train data is  0.247999628228\n",
      "the  57 -th  step is running\n",
      "the rmse of this step on train data is  0.247923779035\n",
      "the  58 -th  step is running\n",
      "the rmse of this step on train data is  0.247854312848\n",
      "the  59 -th  step is running\n",
      "the rmse of this step on train data is  0.247785981412\n",
      "the  60 -th  step is running\n",
      "the rmse of this step on train data is  0.247724897602\n",
      "the  61 -th  step is running\n",
      "the rmse of this step on train data is  0.247669087609\n",
      "the  62 -th  step is running\n",
      "the rmse of this step on train data is  0.2476131068\n",
      "the  63 -th  step is running\n",
      "the rmse of this step on train data is  0.247563254359\n",
      "the  64 -th  step is running\n",
      "the rmse of this step on train data is  0.247518608743\n",
      "the  65 -th  step is running\n",
      "the rmse of this step on train data is  0.247474444227\n",
      "the  66 -th  step is running\n",
      "the rmse of this step on train data is  0.247432311273\n",
      "the  67 -th  step is running\n",
      "the rmse of this step on train data is  0.247395745232\n",
      "the  68 -th  step is running\n",
      "the rmse of this step on train data is  0.247360152726\n",
      "the  69 -th  step is running\n",
      "the rmse of this step on train data is  0.247326629987\n",
      "the  70 -th  step is running\n",
      "the rmse of this step on train data is  0.247299889172\n",
      "the  71 -th  step is running\n",
      "the rmse of this step on train data is  0.247271385925\n",
      "the  72 -th  step is running\n",
      "the rmse of this step on train data is  0.247246360881\n",
      "the  73 -th  step is running\n",
      "the rmse of this step on train data is  0.247216202236\n",
      "the  74 -th  step is running\n",
      "the rmse of this step on train data is  0.247199271032\n",
      "the  75 -th  step is running\n",
      "the rmse of this step on train data is  0.247178806399\n",
      "the  76 -th  step is running\n",
      "the rmse of this step on train data is  0.247160604365\n",
      "the  77 -th  step is running\n",
      "the rmse of this step on train data is  0.247141612888\n",
      "the  78 -th  step is running\n",
      "the rmse of this step on train data is  0.247123694815\n",
      "the  79 -th  step is running\n",
      "the rmse of this step on train data is  0.247108400758\n",
      "the  80 -th  step is running\n",
      "the rmse of this step on train data is  0.247094718036\n",
      "the  81 -th  step is running\n",
      "the rmse of this step on train data is  0.247080857507\n",
      "the  82 -th  step is running\n",
      "the rmse of this step on train data is  0.247068729139\n",
      "the  83 -th  step is running\n",
      "the rmse of this step on train data is  0.247057258099\n",
      "the  84 -th  step is running\n",
      "the rmse of this step on train data is  0.247045809748\n",
      "the  85 -th  step is running\n",
      "the rmse of this step on train data is  0.247037055025\n",
      "the  86 -th  step is running\n",
      "the rmse of this step on train data is  0.247026762361\n",
      "the  87 -th  step is running\n",
      "the rmse of this step on train data is  0.247019565379\n",
      "the  88 -th  step is running\n",
      "the rmse of this step on train data is  0.247010784346\n",
      "the  89 -th  step is running\n",
      "the rmse of this step on train data is  0.247003657857\n",
      "the  90 -th  step is running\n",
      "the rmse of this step on train data is  0.246996661271\n",
      "the  91 -th  step is running\n",
      "the rmse of this step on train data is  0.246989539749\n",
      "the  92 -th  step is running\n",
      "the rmse of this step on train data is  0.246983955034\n",
      "the  93 -th  step is running\n",
      "the rmse of this step on train data is  0.246978688802\n",
      "the  94 -th  step is running\n",
      "the rmse of this step on train data is  0.246972958156\n",
      "the  95 -th  step is running\n",
      "the rmse of this step on train data is  0.246968481556\n",
      "the  96 -th  step is running\n",
      "the rmse of this step on train data is  0.246963950464\n",
      "the  97 -th  step is running\n",
      "the rmse of this step on train data is  0.24695974723\n",
      "the  98 -th  step is running\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "the rmse of this step on train data is  0.246955634413\n",
      "the  99 -th  step is running\n",
      "the rmse of this step on train data is  0.246952067613\n",
      "SVD trained\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "unsupported operand type(s) for +: 'collections.defaultdict' and 'int'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-11-0c54026e4169>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mrs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mRecommonderSystem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-10-2046be7a0132>\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m     49\u001b[0m     \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0meventPopularity\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmmread\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"./data/EA_eventPopularity\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtodense\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     50\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 51\u001b[1;33m     \u001b[0mn_Users\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitemsForUser\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m  \u001b[1;31m#数组的索引从0开始，浪费第0个元素\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     52\u001b[0m     \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msimilarity\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn_Users\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_Users\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     53\u001b[0m     \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msimilarity\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mTypeError\u001b[0m: unsupported operand type(s) for +: 'collections.defaultdict' and 'int'"
     ]
    }
   ],
   "source": [
    "rs = RecommonderSystem()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
