{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import pickle as pk\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "from scipy.stats import pearsonr\n",
    "\n",
    "class RecommonderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    #用户和活动新的索引\n",
    "    self.userIndex = pk.load(open(\"./created_data/PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = pk.load(open(\"./created_data/PE_eventIndex.pkl\", 'rb'))\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    #用户-活动关系矩阵R\n",
    "    #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"./created_data/PE_userEventScores\").todense()\n",
    "    \n",
    "    #倒排表\n",
    "    ##每个用户参加的事件\n",
    "    self.itemsForUser = pk.load(open(\"./created_data/PE_eventsForUser.pkl\", 'rb'))\n",
    "    ##事件参加的用户\n",
    "    self.usersForItem = pk.load(open(\"./created_data/PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    #基于模型的协同过滤参数初始化,训练\n",
    "    self.init_SVD()\n",
    "#     self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "    #根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"./created_data/US_userSimMatrix\").todense()\n",
    "    \n",
    "    #根据活动属性计算出的活动之间的相似度\n",
    "    self.eventPropSim = sio.mmread(\"./created_data/EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"./created_data/EV_eventContSim\").todense()\n",
    "    \n",
    "    #每个用户的朋友的数目\n",
    "    self.numFriends = sio.mmread(\"./created_data/UF_numFriends\")\n",
    "    #用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"./created_data/UF_userFriends\").todense()\n",
    "    \n",
    "    #活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"./created_data/EA_eventPopularity\").todense()\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "    self.K = K  \n",
    "    \n",
    "    #init parameters\n",
    "    #bias\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "    #the small matrix\n",
    "    self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "                  \n",
    "          \n",
    "  def train_SVD(self,trainfile = 'train.csv', steps=100,gamma=0.04,Lambda=0.15, early_stop_rounds=10):\n",
    "    #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "    #gamma：为学习率\n",
    "    #Lambda：正则参数\n",
    "    \n",
    "    #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "    print(\"SVD Training...\")\n",
    "    ftrain = open(trainfile, 'r')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0\n",
    "    n_records = 0\n",
    "    uids = []  #每条记录的用户索引\n",
    "    i_ids = [] #每条记录的item索引\n",
    "    #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(\",\")\n",
    "        u = self.userIndex[cols[0]]  #用户\n",
    "        i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  #interested\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    rmse_sum_old = 0.0\n",
    "    same_step = 0\n",
    "    for step in range(steps):\n",
    "        print('the {}-th step is running...'.format(step+1))\n",
    "        rmse_sum = 0.0\n",
    "        \n",
    "        kk = np.random.permutation(n_records) # 洗牌操作,将训练样本打散\n",
    "        for j in range(n_records):\n",
    "            # 每次只训练一个样本\n",
    "            index = kk[j]\n",
    "            u = uids[index] #重新编排后的索引号\n",
    "            i = i_ids[index]\n",
    "            rat = R[u,i] # 获取真实的打分\n",
    "            \n",
    "            # 预测残差\n",
    "            eui = rat - self.pred_SVD(u, i) #真值减去预测值就是残差\n",
    "            # 残差平方和\n",
    "            rmse_sum += eui ** 2\n",
    "            \n",
    "            # 随机梯度下降更新\n",
    "            for k in range(self.K):\n",
    "                self.P[u,k] += gamma * eui * self.Q[k,i] - Lambda * self.P[u,k]\n",
    "                self.Q[k,i] += gamma * eui * self.P[u,k] - Lambda * self.Q[k,i]\n",
    "                \n",
    "            self.bu[u] += gamma * (eui - Lambda * self.bu[u])\n",
    "            self.bi[i] += gamma * (eui - Lambda * self.bi[i])\n",
    "            \n",
    "            # 学习率递减\n",
    "            gamma = gamma * 0.93\n",
    "        print(\"the rmse of this step on train data is \",np.sqrt(rmse_sum/n_records))\n",
    "#         if (rmse_sum_old - rmse_sum) < 0.00000000001:\n",
    "#             same_step += 1\n",
    "#         else:\n",
    "#             rmse_sum_old = rmse_sum\n",
    "#             same_step = 0\n",
    "#         if same_step > 10:\n",
    "#             break\n",
    "\n",
    "    print(\"SVD trained\")\n",
    "    \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "    ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "    # self.mu：总的一个平均分    \n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "    \"\"\"\n",
    "\t基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "    这里看用户1和用户2参加活动的相似度，所以传入的用户是重新编过的索引号，为了找到两个用户都参加过的活动\n",
    "\t:param self: \n",
    "\t:param uid1: uid1是用户1的重新设置的索引号\n",
    "\t:param uid2: uid2是用户2的重新设置的索引号\n",
    "\t:return: 返回相似度\n",
    "\t\"\"\"       \n",
    "    same_events = self.itemsForUser[uid1] & self.itemsForUser[uid2] # 两个用户均参与过的活动\n",
    "    # 因为itemForUser是一个defaultdict，内层是一个set()，所以可以直接求交集得之\n",
    "    if len(same_events) == 0:\n",
    "        similarity = 0 # 如果没有共同活动，那么两个用户的相似度认为为0\n",
    "    else:\n",
    "        # 找到两个用户对这些共同参加过的活动的打分，也就是感兴趣与否，感兴趣：1，不感兴趣：0\n",
    "        score_uid1 = np.array([self.userEventScores[uid1, event] for event in same_events])\n",
    "        score_uid2 = np.array([self.userEventScores[uid2, event] for event in same_events])\n",
    "        # 接下来求两个用户之间的Pearson相关系数。\n",
    "        simPearson = pearsonr(score_uid1,score_uid2)[0] # pearsonr函数会返回两个值，一个是pearson系数，一个是相关系数\n",
    "        if np.isnan(simPearson): # 计算出来的pearson系数可能是np.nan值，此时我们认为相似度为0，pearson系数可以为负值\n",
    "            similarity = 0\n",
    "        else:\n",
    "            similarity = simPearson\n",
    "    return similarity  \n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    \n",
    "    score_i = np.array([self.userEventScores[i, event] for event in self.itemsForUser[i]])\n",
    "    # 找出用户i所有参加过的活动，并计算平均感兴趣程度\n",
    "    if len(score_i) == 0:\n",
    "        interested_avg_i = 0\n",
    "    else:\n",
    "        interested_avg_i = sum(score_i) / len(score_i)\n",
    "    \n",
    "    sim_weight = 0.0 # 分子\n",
    "    sim_sum = 0.0 #分母\n",
    "        \n",
    "    for user in self.usersForItem[j]: # 找到所有对这个物品打过分的用户（这里是所有参加过这个事件的用户）\n",
    "        sim = self.sim_cal_UserCF(i, user) # 找到两个用户之间的相似度\n",
    "        if sim < 0:\n",
    "            continue\n",
    "        score_user = np.array([self.userEventScores[user, event] for event in self.itemsForUser[user]])\n",
    "        # 找出用户user所有参加过的活动，并计算平均感兴趣程度\n",
    "        if len(score_user) == 0:\n",
    "            interested_avg_user = 0\n",
    "        else:\n",
    "            interested_avg_user = sum(score_user) / len(score_user)\n",
    "        \n",
    "        sim_sum += sim # 分母累加\n",
    "        sim_weight += sim * (self.userEventScores[user, j] - interested_avg_user )# 找到当前user对事件j的打分,减去平均打分\n",
    "        \n",
    "    if sim_sum == 0:\n",
    "        return 0\n",
    "    ans = interested_avg_i + sim_weight / sim_sum #最后的结果再加上用户i的平均打分\n",
    "    # 将最后的结果控制在0-1之间\n",
    "    if ans > 1:\n",
    "        ans = 1\n",
    "    elif ans < 0:\n",
    "        ans = 0\n",
    "    return ans\n",
    "\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    \"\"\"\n",
    "    计算Item i_id1和i_id2之间的相似性,跟计算用户相似度一样，因为是defaultdcit(set)的变量类型，所以可以直接求交集\n",
    "    \"\"\"\n",
    "    same_user = self.usersForItem[i_id1] &  self.usersForItem[i_id2]\n",
    "    if len(same_user) == 0:\n",
    "        similarity = 0\n",
    "    else:\n",
    "        score_item1 = np.array([self.userEventScores[u, i_id1] for u in same_user])\n",
    "        score_item2 = np.array([self.userEventScores[u, i_id2] for u in same_user])\n",
    "        \n",
    "        simPearson = pearsonr(score_item1,score_item2)[0] # pearsonr函数会返回两个值，一个是pearson系数，一个是相关系数\n",
    "        if np.isnan(simPearson): # 计算出来的pearson系数可能是np.nan值，此时我们认为相似度为0，pearson系数可以为负值\n",
    "            similarity = 0\n",
    "        else:\n",
    "            similarity = simPearson\n",
    "            \n",
    "    return similarity  \n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "      \n",
    "    sim_weight = 0.0 # 分子\n",
    "    sim_sum = 0.0 #分母\n",
    "        \n",
    "    for item in self.itemsForUser[i]: # 找到这个用户所有打过分的物品（这里是这个用户参加过的所有活动）\n",
    "        sim = self.sim_cal_UserCF(j, item) # 计算两个事件之间的相似度\n",
    "        if sim < 0:\n",
    "            continue\n",
    "                \n",
    "        sim_sum += sim # 分母累加\n",
    "        sim_weight += sim * self.userEventScores[i, item]# 根据公式计算加权得分\n",
    "        \n",
    "    if sim_sum == 0:\n",
    "        return 0\n",
    "    ans = sim_weight / sim_sum #最后的结果\n",
    "    # 将最后的结果控制在0-1之间\n",
    "    if ans > 1:\n",
    "        ans = 1\n",
    "    elif ans < 0:\n",
    "        ans = 0\n",
    "    return ans\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    #基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "    \n",
    "    return self.pred_SVD(u, i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if userId in self.userIndex:\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "    用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'w')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline()\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "      ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "      if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "      fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "      ln += 1\n",
    "      if ln%500 == 0:\n",
    "          print(\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "          #break\n",
    "            \n",
    "      line = line.decode()\n",
    "      cols = line.strip().split(\",\")\n",
    "      userId = cols[0]\n",
    "      eventId = cols[1]\n",
    "      invited = cols[2]\n",
    "      \n",
    "      userCF_reco = RS.userCFReco(userId, eventId)\n",
    "      itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "      svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "      user_reco = RS.userReco(userId, eventId)\n",
    "      evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "      user_pop = RS.userPop(userId)\n",
    "     \n",
    "      frnd_infl = RS.friendInfluence(userId)\n",
    "      evt_pop = RS.eventPop(eventId)\n",
    "      ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "      if train:\n",
    "        ocols.append(cols[4]) # interested\n",
    "        ocols.append(cols[5]) # not_interested\n",
    "      fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'RecommonderSystem' object has no attribute 'mu'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-18-c3897c74b322>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      8\u001b[0m \u001b[1;31m#     if len(df[i].value_counts()) != 1:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      9\u001b[0m \u001b[1;31m#         print(df[i].value_counts())\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 10\u001b[1;33m \u001b[0mrs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmu\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m: 'RecommonderSystem' object has no attribute 'mu'"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "rs = RecommonderSystem()\n",
    "# np.sum(rs.userFriends)\n",
    "\n",
    "# df = pd.DataFrame(rs.userFriends)\n",
    "# df.to_csv('tet.csv',index=False,header=False)\n",
    "# for i in range(3391):\n",
    "#     if len(df[i].value_counts()) != 1:\n",
    "#         print(df[i].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Training...\n",
      "the 1-th step is running...\n",
      "the rmse of this step on train data is  0.668940469065\n",
      "the 2-th step is running...\n",
      "the rmse of this step on train data is  0.474759311587\n",
      "the 3-th step is running...\n",
      "the rmse of this step on train data is  0.448480823141\n",
      "the 4-th step is running...\n",
      "the rmse of this step on train data is  0.444462355097\n",
      "the 5-th step is running...\n",
      "the rmse of this step on train data is  0.443383636813\n",
      "the 6-th step is running...\n",
      "the rmse of this step on train data is  0.443081859121\n",
      "the 7-th step is running...\n",
      "the rmse of this step on train data is  0.442985937112\n",
      "the 8-th step is running...\n",
      "the rmse of this step on train data is  0.442956499524\n",
      "the 9-th step is running...\n",
      "the rmse of this step on train data is  0.442945919726\n",
      "the 10-th step is running...\n",
      "the rmse of this step on train data is  0.442942767786\n",
      "the 11-th step is running...\n",
      "the rmse of this step on train data is  0.442941513995\n",
      "the 12-th step is running...\n",
      "the rmse of this step on train data is  0.442941130945\n",
      "the 13-th step is running...\n",
      "the rmse of this step on train data is  0.442940954467\n",
      "the 14-th step is running...\n",
      "the rmse of this step on train data is  0.442940910386\n",
      "the 15-th step is running...\n",
      "the rmse of this step on train data is  0.442940889511\n",
      "the 16-th step is running...\n",
      "the rmse of this step on train data is  0.442940884436\n",
      "the 17-th step is running...\n",
      "the rmse of this step on train data is  0.442940881107\n",
      "the 18-th step is running...\n",
      "the rmse of this step on train data is  0.442940880227\n",
      "the 19-th step is running...\n",
      "the rmse of this step on train data is  0.442940879725\n",
      "the 20-th step is running...\n",
      "the rmse of this step on train data is  0.442940879591\n",
      "the 21-th step is running...\n",
      "the rmse of this step on train data is  0.44294087954\n",
      "the 22-th step is running...\n",
      "the rmse of this step on train data is  0.442940879516\n",
      "the 23-th step is running...\n",
      "the rmse of this step on train data is  0.442940879502\n",
      "the 24-th step is running...\n",
      "the rmse of this step on train data is  0.442940879495\n",
      "the 25-th step is running...\n",
      "the rmse of this step on train data is  0.442940879492\n",
      "the 26-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 27-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 28-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 29-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 30-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 31-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 32-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 33-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 34-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 35-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 36-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 37-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 38-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 39-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 40-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 41-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 42-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 43-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 44-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 45-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 46-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 47-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 48-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 49-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 50-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 51-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 52-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 53-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 54-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 55-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 56-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 57-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 58-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 59-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 60-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 61-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 62-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 63-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 64-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 65-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 66-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 67-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 68-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 69-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 70-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 71-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 72-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 73-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 74-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 75-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 76-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 77-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 78-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 79-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 80-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 81-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 82-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 83-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 84-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 85-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 86-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 87-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 88-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 89-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 90-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 91-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 92-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 93-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 94-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 95-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 96-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 97-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 98-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "the 99-th step is running...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "the rmse of this step on train data is  0.442940879491\n",
      "the 100-th step is running...\n",
      "the rmse of this step on train data is  0.442940879491\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:3021: RuntimeWarning: invalid value encountered in double_scalars\n",
      "  r = r_num / r_den\n",
      "C:\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:5277: RuntimeWarning: invalid value encountered in less\n",
      "  x = np.where(x < 1.0, x, 1.0)  # if x > 1 then return 1.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n",
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()\n",
    "print(\"生成训练数据...\\n\")\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print(\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
