{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import pickle\n",
    "import math\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "\n",
    "class RecommonderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    #用户和活动新的索引\n",
    "    self.userIndex = pickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = pickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    #用户-活动关系矩阵R\n",
    "    #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "    \n",
    "    #倒排表\n",
    "    ##每个用户参加的事件\n",
    "    self.itemsForUser = pickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "    ##事件参加的用户\n",
    "    self.usersForItem = pickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    #基于模型的协同过滤参数初始化,训练\n",
    "    self.init_SVD()\n",
    "    self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "    #根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "    \n",
    "    #根据活动属性计算出的活动之间的相似度\n",
    "    self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "    \n",
    "    #每个用户的朋友的数目\n",
    "    self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "    #用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "    \n",
    "    #活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "    self.K = K  \n",
    "    \n",
    "    #init parameters\n",
    "    #bias\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "    #the small matrix\n",
    "    self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "                  \n",
    "     \n",
    "  def train_SVD(self,trainfile = 'train.csv', steps=100,gamma=0.04,Lambda=0.15):\n",
    "    #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "    #gamma：为学习率\n",
    "    #Lambda：正则参数\n",
    "    \n",
    "    #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "    print (\"SVD Train...\")\n",
    "    ftrain = open(trainfile, 'rb')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0\n",
    "    n_records = 0\n",
    "    uids = []  #每条记录的用户索引\n",
    "    i_ids = [] #每条记录的item索引\n",
    "    #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(b\",\")\n",
    "        u = self.userIndex[cols[0]]  #用户\n",
    "        i = self.eventIndex[cols[1]] #活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  #interested\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    \n",
    "    # 请补充完整SVD模型训练过程\n",
    "    #**************将评分矩阵R采用随机梯度下降法分解为P和Q*******************\n",
    "    for step in range(steps):\n",
    "        for Ui in set(uids):\n",
    "            for Ij in set(i_ids):\n",
    "                if R[Ui,Ij]!=0:\n",
    "                    hat_rui = self.pred_SVD(Ui,Ij)\n",
    "                    err_ui = R[Ui,Ij] - hat_rui\n",
    "                    self.bu[Ui] += gamma*(err_ui-Lambda*self.bu[Ui])\n",
    "                    self.bi[Ij] += gamma*(err_ui-Lambda*self.bi[Ij])\n",
    "                    for f in range(self.K):\n",
    "                        self.P[Ui][f] += gamma*(err_ui*self.Q[f][Ij]-Lambda*self.P[Ui][f])\n",
    "                        self.Q[f][Ij] += gamma*(err_ui*self.P[Ui][f]-Lambda*self.Q[f][Ij])\n",
    "        gamma *=0.9   #每次迭代步长要逐步缩小\n",
    "    #************************************************************************\n",
    "    \n",
    "    print (\"SVD trained\")\n",
    "     \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "    ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "        \n",
    "    #将打分范围控制在0-1之间\n",
    "    if ans>1:  \n",
    "        return 1  \n",
    "    elif ans<0:  \n",
    "        return 0\n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2, evnid):\n",
    "    #请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）,默认传进来的都是新编码\n",
    "    similarity = 0.0\n",
    "    #计算两用户参加活动的交集，除去要预测的活动\n",
    "    jiaoji=self.itemsForUser[uid1].intersection(self.itemsForUser[uid2])-{evnid}\n",
    "    num=len(jiaoji) #交叉的活动数\n",
    "    \n",
    "    ScrofU1=[]\n",
    "    ScrofU2=[]\n",
    "    #将两用户对共同参加活动的评分放入列表list中，方便计算\n",
    "    for i in jiaoji:\n",
    "        ScrofU1.append(self.userEventScores[uid1,i])\n",
    "        ScrofU2.append(self.userEventScores[uid2,i])\n",
    "        \n",
    "    #计算平均值\n",
    "    mean1=sum(ScrofU1)/num\n",
    "    mean2=sum(ScrofU2)/num\n",
    "    \n",
    "    #分别计算相似度公式的分子和分母\n",
    "    fenzi,fenmu1,fenmu2=0,0,0\n",
    "\n",
    "    for i in range(len(ScrofU1)):\n",
    "        fenzi  +=(ScrofU1[i]-mean1)*(ScrofU2[i]-mean2)\n",
    "        fenmu1 +=pow((ScrofU1[i]-mean1),2)\n",
    "        fenmu2 +=pow((ScrofU2[i]-mean2),2)\n",
    "\n",
    "    #print fenzi,fenmu1,fenmu2    \n",
    "    \n",
    "    #计算两个用户间的相似度\n",
    "    similarity =fenzi/(math.sqrt(fenmu1)*math.sqrt(fenmu2))\n",
    "    \n",
    "    #返回相似度、相邻用户的平均值\n",
    "    return similarity,mean2  \n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0  #推荐分\n",
    "    sum = 0.0  #相似度\n",
    "    weightsum=0.0  #加权和\n",
    "    meanb=0.0  #相邻用户的平均分\n",
    "    score=0.0\n",
    "    sim = 0.0\n",
    "    \n",
    "    i=self.userIndex[userId]\n",
    "    j=self.eventIndex[eventId]\n",
    "    \n",
    "    #拟推荐用户参加的活动集，用于同也参加（全部或部分）了这些活动的其他用户计算相似度\n",
    "    a=self.itemsForUser[i]\n",
    "    \n",
    "    #找到给eventId打分的用户集set\n",
    "    b=self.usersForItem[j]\n",
    "    \n",
    "    for ui in b:\n",
    "        if(len(self.itemsForUser[ui].intersection(a)-{j}))>=2:\n",
    "            sim,meanb = self.sim_cal_UserCF(i, ui, j)\n",
    "            if sim >= 0.1:\n",
    "                sum +=sim\n",
    "                weightsum += sim*(self.userEventScores[ui,j]-meanb)\n",
    "    \n",
    "    #计算拟推荐用户的平均值\n",
    "    if len(a-{j})>1:\n",
    "        for k in (a-{j}):\n",
    "            score +=self.userEventScores[i,k]\n",
    "        meani=score/len(a-{j})\n",
    "    else:\n",
    "        meani=0\n",
    "    \n",
    "    #计算总的推荐评分\n",
    "    if weightsum!=0:\n",
    "        ans=meani+sim/weightsum\n",
    "    else:\n",
    "        ans=0\n",
    "            \n",
    "    return ans\n",
    "\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    #计算Item i_id1和i_id2之间的相似性\n",
    "    #请补充完整代码\n",
    "    similarity = 0.0\n",
    "        \n",
    "     #分别计算相似度公式的分子和分母\n",
    "    fenzi,fenmu1,fenmu2=0,0,0\n",
    "    for i in range(len(i_id1)):\n",
    "        fenzi  += i_id1[i]*i_id2[i]\n",
    "        fenmu1 += pow(i_id1[i],2)\n",
    "        fenmu2 += pow(i_id2[i],2)\n",
    "    \n",
    "    similarity=fenzi/(math.sqrt(fenmu1)*math.sqrt(fenmu2))   \n",
    "    \n",
    "    return similarity \n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    ans = 0.0  #推荐分\n",
    "    sim = 0.0  #相似度\n",
    "    sum_fz=0.0  #加权和(分子)\n",
    "    sum_fm=0.0  #分母\n",
    "    meanb=0.0  #某用户的平均分\n",
    "    i_id1=[]   #存储同时参加某两个活动的用户的评分的列表\n",
    "    i_id2=[]\n",
    "    fenzi,fenmu1,fenmu2=0,0,0    \n",
    "    \n",
    "    \n",
    "    A=self.userIndex[userId]\n",
    "    ItemA=self.eventIndex[eventId]\n",
    "    \n",
    "    Items=self.itemsForUser[A]\n",
    "    for Itemi in Items:\n",
    "        Users=self.usersForItem[Itemi]-{A}\n",
    "        #print(Users)\n",
    "        for Userj in Users:\n",
    "            #print(Userj)\n",
    "            if ItemA in self.itemsForUser[Userj]:\n",
    "                #print(\"找到同时参加两个活动并评分的用户了\")\n",
    "                n,sum=0.0,0.0\n",
    "                for k in self.itemsForUser[Userj]:\n",
    "                    sum +=self.userEventScores[Userj,k]\n",
    "                    n +=1\n",
    "                meanb=sum/n\n",
    "                i_id1.append(self.userEventScores[Userj,ItemA]-meanb)\n",
    "                i_id2.append(self.userEventScores[Userj,Itemi]-meanb)\n",
    "                \n",
    "            #sim=self.sim_cal_ItemCF(i_id1,i_id2)\n",
    "            #print(i_id1)\n",
    "        if len(i_id1)>1:\n",
    "            for i in range(len(i_id1)):\n",
    "                fenzi  += i_id1[i]*i_id2[i]\n",
    "                fenmu1 += pow(i_id1[i],2)\n",
    "                fenmu2 += pow(i_id2[i],2)    \n",
    "            sim=fenzi/(math.sqrt(fenmu1)*math.sqrt(fenmu2))\n",
    "            #print(sim) \n",
    "            if sim>=0.0:\n",
    "                sum_fz += self.userEventScores[A,Itemi]*sim\n",
    "                sum_fm += sim   \n",
    "        \n",
    "    if sum_fm!=0:\n",
    "        ans=sum_fz/sum_fm \n",
    "    else:\n",
    "        ans=0   \n",
    "  \n",
    "    return ans\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    #基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "\n",
    "    return self.pred_SVD(u,i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if userId in self.userIndex:\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "    用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'w')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(b\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "      #ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "      ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\"]\n",
    "      if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "      fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "      ln += 1\n",
    "      if ln%500 == 0:\n",
    "          print (\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "          #break;\n",
    "      \n",
    "      cols = line.strip().split(b\",\")\n",
    "      userId = cols[0]\n",
    "      eventId = cols[1]\n",
    "      invited = cols[2]\n",
    "      \n",
    "      userCF_reco = RS.userCFReco(userId, eventId)\n",
    "      itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "      svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "      #svdCF_reco = 0\n",
    "        \n",
    "      #user_reco = RS.userReco(userId, eventId)\n",
    "      #evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "      #user_pop = RS.userPop(userId)\n",
    "     \n",
    "      #frnd_infl = RS.friendInfluence(userId)\n",
    "      #evt_pop = RS.eventPop(eventId)\n",
    "      #ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco]\n",
    "      \n",
    "      if train:\n",
    "        ocols.append(cols[4]) # interested\n",
    "        ocols.append(cols[5]) # not_interested\n",
    "      fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "SVD trained\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成训练数据...\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\LXM\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:156: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train.csv:500 (userId, eventId)=(b'123290209', b'1887085024')\n",
      "train.csv:1000 (userId, eventId)=(b'272886293', b'199858305')\n",
      "train.csv:1500 (userId, eventId)=(b'395305791', b'1582270949')\n",
      "train.csv:2000 (userId, eventId)=(b'527523423', b'3272728211')\n",
      "train.csv:2500 (userId, eventId)=(b'651258472', b'792632006')\n",
      "train.csv:3000 (userId, eventId)=(b'811791433', b'524756826')\n",
      "train.csv:3500 (userId, eventId)=(b'985547042', b'1269035551')\n",
      "train.csv:4000 (userId, eventId)=(b'1107615001', b'173949238')\n",
      "train.csv:4500 (userId, eventId)=(b'1236336671', b'3849306291')\n",
      "train.csv:5000 (userId, eventId)=(b'1414301782', b'2652356640')\n",
      "train.csv:5500 (userId, eventId)=(b'1595465532', b'955398943')\n",
      "train.csv:6000 (userId, eventId)=(b'1747091728', b'2131379889')\n",
      "train.csv:6500 (userId, eventId)=(b'1914182220', b'955398943')\n",
      "train.csv:7000 (userId, eventId)=(b'2071842684', b'1076364848')\n",
      "train.csv:7500 (userId, eventId)=(b'2217853337', b'3051438735')\n",
      "train.csv:8000 (userId, eventId)=(b'2338481531', b'2525447278')\n",
      "train.csv:8500 (userId, eventId)=(b'2489551967', b'520657921')\n",
      "train.csv:9000 (userId, eventId)=(b'2650493630', b'87962584')\n",
      "train.csv:9500 (userId, eventId)=(b'2791418962', b'4223848259')\n",
      "train.csv:10000 (userId, eventId)=(b'2903662804', b'2791462807')\n",
      "train.csv:10500 (userId, eventId)=(b'3036141956', b'3929507420')\n",
      "train.csv:11000 (userId, eventId)=(b'3176074542', b'3459485614')\n",
      "train.csv:11500 (userId, eventId)=(b'3285425249', b'2271782630')\n",
      "train.csv:12000 (userId, eventId)=(b'3410667855', b'1063772489')\n",
      "train.csv:12500 (userId, eventId)=(b'3531604778', b'2584839423')\n",
      "train.csv:13000 (userId, eventId)=(b'3686871863', b'53495098')\n",
      "train.csv:13500 (userId, eventId)=(b'3833637800', b'2415873572')\n",
      "train.csv:14000 (userId, eventId)=(b'3944021305', b'2096772901')\n",
      "train.csv:14500 (userId, eventId)=(b'4075466480', b'3567240505')\n",
      "train.csv:15000 (userId, eventId)=(b'4197193550', b'1628057176')\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(b'182290053', b'2529072432')\n",
      "test.csv:1000 (userId, eventId)=(b'433510318', b'4244463632')\n",
      "test.csv:1500 (userId, eventId)=(b'632808865', b'2845303452')\n",
      "test.csv:2000 (userId, eventId)=(b'813611885', b'2036538169')\n",
      "test.csv:2500 (userId, eventId)=(b'1010701404', b'303459881')\n",
      "test.csv:3000 (userId, eventId)=(b'1210932037', b'2529072432')\n",
      "test.csv:3500 (userId, eventId)=(b'1452921099', b'2705317682')\n",
      "test.csv:4000 (userId, eventId)=(b'1623287180', b'1626678328')\n",
      "test.csv:4500 (userId, eventId)=(b'1855201342', b'2603032829')\n",
      "test.csv:5000 (userId, eventId)=(b'2083900381', b'2529072432')\n",
      "test.csv:5500 (userId, eventId)=(b'2318415276', b'2509151803')\n",
      "test.csv:6000 (userId, eventId)=(b'2528161539', b'4025975316')\n",
      "test.csv:6500 (userId, eventId)=(b'2749110768', b'4244406355')\n",
      "test.csv:7000 (userId, eventId)=(b'2927772127', b'1532377761')\n",
      "test.csv:7500 (userId, eventId)=(b'3199685636', b'1776393554')\n",
      "test.csv:8000 (userId, eventId)=(b'3393388475', b'680270887')\n",
      "test.csv:8500 (userId, eventId)=(b'3601169721', b'154434302')\n",
      "test.csv:9000 (userId, eventId)=(b'3828963415', b'3067222491')\n",
      "test.csv:9500 (userId, eventId)=(b'4018723397', b'2522610844')\n",
      "test.csv:10000 (userId, eventId)=(b'4180064266', b'2658555390')\n"
     ]
    }
   ],
   "source": [
    "print (\"生成训练数据...\\n\")\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print (\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
