{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 将所有特征串联起来, 构成 RS_Train.csv\n",
    "# RS_Test.csv\n",
    "# 为最后推荐系统做准备\n",
    "\n",
    "import pickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random\n",
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class RecommenderSystem:\n",
    "  def __init__(self):\n",
    "    # 读入数据做初始化\n",
    "    \n",
    "    # 用户和活动的新索引\n",
    "    self.userIndex = pickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "    self.eventIndex = pickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "    # 用户和活动的数目\n",
    "    self.n_users = len(self.userIndex)\n",
    "    self.n_items = len(self.eventIndex)\n",
    "    \n",
    "    # 用户 - 活动关系矩阵 R\n",
    "    # 在 train_SVD 会重新从文件中读取, 二者要求的格式不同, 来不及统一了:(\n",
    "    self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "    \n",
    "    # 倒排表\n",
    "    # 每个用户参加的活动\n",
    "    self.itemsForUser = pickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "    # 参加每个活动的用户\n",
    "    self.usersForItem = pickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "    \n",
    "    # 基于模型的协同过滤参数初始化, 训练\n",
    "    self.init_SVD()\n",
    "    self.train_SVD(trainfile = \"train.csv\")\n",
    "    \n",
    "    # 根据用户属性计算出的用户之间的相似度\n",
    "    self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "    \n",
    "    # 根据活动属性计算出的活动之间的相似度(非词频和词频特征)\n",
    "    self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "    self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "    \n",
    "    # 每个用户的朋友数目\n",
    "    self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "    # 用户的每个朋友参加活动的分数对该用户的影响\n",
    "    self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "    \n",
    "    # 活动本身的热度\n",
    "    self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "\n",
    "  def init_SVD(self, K=20):\n",
    "    # 初始化模型参数(for 基于模型的协同过滤 SVD_CF)\n",
    "    self.K = K  \n",
    "    \n",
    "    # 初始化参数\n",
    "    # 用户和活动的属性值偏差\n",
    "    self.bi = np.zeros(self.n_items)  \n",
    "    self.bu = np.zeros(self.n_users)  \n",
    "    \n",
    "    # 分解的小矩阵 P, Q\n",
    "    self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "    self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "                  \n",
    "          \n",
    "  def train_SVD(self, trainfile = 'train.csv', steps=100, gamma=0.04, Lambda=0.15):\n",
    "    # 训练 SVD 模型（for 基于模型的协同过滤 SVD_CF）\n",
    "    # steps: 迭代次数\n",
    "    # gamma: 学习率\n",
    "    # Lambda: 正则参数\n",
    "    \n",
    "    # 偷懒了, 为了和原来的代码的输入接口一样, 直接从训练文件中去读取数据\n",
    "    print(\"SVD Train...\")\n",
    "    ftrain = open(trainfile, 'rb')\n",
    "    ftrain.readline()\n",
    "    self.mu = 0.0 # 总的打分均值 μ\n",
    "    n_records = 0 # 记录数目\n",
    "    uids = []  # 每条记录的用户新索引\n",
    "    i_ids = [] # 每条记录的 item 新索引\n",
    "    # 用户 - Item 关系矩阵 R(内容同 userEventScores 相同), 临时变量, 训练完了 R 不再需要\n",
    "    R = np.zeros((self.n_users, self.n_items))\n",
    "    \n",
    "    for line in ftrain:\n",
    "        cols = line.strip().split(','.encode(encoding='utf-8'))\n",
    "        u = self.userIndex[cols[0]]  # 用户\n",
    "        i = self.eventIndex[cols[1]] # 活动\n",
    "        \n",
    "        uids.append(u)\n",
    "        i_ids.append(i)\n",
    "        \n",
    "        R[u,i] = int(cols[4])  # interested 感兴趣\n",
    "        self.mu += R[u,i]\n",
    "        n_records += 1\n",
    "    \n",
    "    ftrain.close()\n",
    "    self.mu /= n_records\n",
    "    \n",
    "    # 请补充完整 SVD 模型训练过程\n",
    "    sse = 0.0\n",
    "    for step in range(steps):\n",
    "        KK = np.random.permutation(n_records) # 随机梯度下降算法, KK 为对 train 样本进行随机洗牌, 返回一个 arange\n",
    "        sse_last = sse # 记录上一次目标函数值\n",
    "        sse = 0.0 # 目标函数值初始化\n",
    "        for j in range(n_records):\n",
    "            n = KK[j] # 每次在一个样本上进行训练\n",
    "            u = uids[n]\n",
    "            i = i_ids[n]\n",
    "            \n",
    "            r_cap = self.pred_SVD(u, i) # 预测用户 u 对活动 i 是否感兴趣\n",
    "            e_ui = R[u, i] - r_cap # 预测残差\n",
    "            sse += 0.5 * (e_ui ** 2) # 预测残差平方和\n",
    "            \n",
    "            sse += 0.5 * Lambda * (self.bu[u] ** 2) # bu 平方和\n",
    "            grad_bu = -e_ui + Lambda * self.bu[u] # 计算梯度 ∂SSE/∂bu\n",
    "            self.bu[u] -= gamma * grad_bu # 更新 bu\n",
    "            \n",
    "            sse += 0.5 * Lambda * (self.bi[i] ** 2) # bi 平方和\n",
    "            grad_bi = -e_ui + Lambda * self.bi[i] # 计算梯度 ∂SSE/∂bi\n",
    "            self.bi[i] -= gamma * grad_bi # 更新 bi\n",
    "            \n",
    "            for k in range(self.K):\n",
    "                sse += 0.5 * Lambda * ((self.P[u, k] ** 2) + (self.Q[k, i] ** 2)) # 加正则项\n",
    "                grad_P_uk = -e_ui * self.Q[k, i] + Lambda * self.P[u, k] # 计算梯度 ∂SSE/∂Puk\n",
    "                grad_Q_ki = -e_ui * self.P[u, k] + Lambda * self.Q[k, i] # 计算梯度 ∂SSE/∂Qki\n",
    "                self.P[u, k] -= gamma * grad_P_uk # 更新 Puk\n",
    "                self.Q[k, i] -= gamma * grad_Q_ki # 更新 Qki\n",
    "        \n",
    "        gamma = gamma / np.sqrt(step + 2) # gamma 以 1/sqrt(t) 递减\n",
    "        \n",
    "#         if (sse > sse_last) and (step > 0): # 如果目标函数不降反增, 终止训练\n",
    "#             break\n",
    "        \n",
    "    print(\"SVD trained\")\n",
    "    \n",
    "  def pred_SVD(self, uid, i_id):\n",
    "    # 根据当前参数, 预测用户 uid 对 Item(i_id)的打分        \n",
    "    ans = self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid, :], self.Q[:, i_id])  \n",
    "        \n",
    "    # 将打分范围控制在 0-1 之间\n",
    "    if ans > 1:  \n",
    "        return 1  \n",
    "    elif ans < 0:  \n",
    "        return 0\n",
    "    \n",
    "    return ans  \n",
    "\n",
    "  def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "    # 请补充基于用户的协同过滤中的两个用户 uid1 和 uid2 之间的相似度(根据两个用户对 item 打分的相似度)\n",
    "    items_uid1 = self.itemsForUser[uid1]\n",
    "    items_uid2 = self.itemsForUser[uid2]\n",
    "    # 找出两个用户共同参与的活动\n",
    "    joint = []\n",
    "    for i in items_uid1:\n",
    "        if i in items_uid2:\n",
    "            joint.append(i)\n",
    "            \n",
    "    if len(joint) == 0:\n",
    "        return 0 # 没有共同参与活动, 相似度为 0\n",
    "    \n",
    "    # 计算 uid1 和 uid2 平均打分\n",
    "    avg1 = np.sum(self.userEventScores[uid1, :]) / len(items_uid1)\n",
    "    avg2 = np.sum(self.userEventScores[uid2, :]) / len(items_uid2)\n",
    "    \n",
    "    num = 0.0\n",
    "    v1 = 0\n",
    "    v2 = 0\n",
    "    for j in joint:\n",
    "        num += (self.userEventScores[uid1, j] - avg1) * (self.userEventScores[uid2, j] - avg2)\n",
    "        v1 += (self.userEventScores[uid1, j] - avg1) ** 2\n",
    "        v2 += (self.userEventScores[uid2, j] - avg2) ** 2\n",
    "        \n",
    "    den = np.sqrt(v1) * np.sqrt(v2)\n",
    "    if den == 0:\n",
    "        return 0\n",
    "    \n",
    "    similarity = num / den\n",
    "    \n",
    "    return similarity\n",
    "\n",
    "  def userCFReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    根据User-based协同过滤，得到event的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    #请补充完整代码\n",
    "    num = 0.0\n",
    "    den = 0.0\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "    \n",
    "    V = self.usersForItem[i]\n",
    "\n",
    "    for v in V:\n",
    "        s = self.sim_cal_UserCF(u, v)\n",
    "        avg = np.sum(self.userEventScores[v, :]) / len(self.itemsForUser[v])\n",
    "        num += s * (self.userEventScores[v, i]- avg)\n",
    "        den += s\n",
    "    \n",
    "    if den == 0: # 没有相似用户\n",
    "        return self.mu\n",
    "    \n",
    "    ans = np.sum(self.userEventScores[u, :]) / len(self.itemsForUser[u]) + num / den\n",
    "    if ans > 1:\n",
    "        return 1\n",
    "    if ans < 0:\n",
    "        return 0\n",
    "    \n",
    "    return ans\n",
    "\n",
    "  def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "    # 计算 Item i_id1 和 i_id2 之间的相似性\n",
    "    # 请补充完整代码\n",
    "    joint = [] # 共同用户集合\n",
    "    \n",
    "    for u in self.usersForItem[i_id1]:\n",
    "        if u in self.usersForItem[i_id2]:\n",
    "            joint.append(u)\n",
    "    \n",
    "    if len(joint) == 0: # 没有共同用户\n",
    "        return 0\n",
    "    \n",
    "    num = 0.0\n",
    "    v1 = 0.0\n",
    "    v2 = 0.0\n",
    "    avg1 = np.sum(self.userEventScores[:, i_id1]) / len(self.usersForItem[i_id1])\n",
    "    avg2 = np.sum(self.userEventScores[:, i_id2]) / len(self.usersForItem[i_id2])\n",
    "    for u in joint:\n",
    "        e_u1 = self.userEventScores[u, i_id1] - avg1\n",
    "        e_u2 = self.userEventScores[u, i_id2] - avg2\n",
    "        num += e_u1 * e_u2\n",
    "        v1 += e_u1 ** 2\n",
    "        v2 += e_u2 ** 2\n",
    "        \n",
    "    den = np.sqrt(v1 * v2)\n",
    "    if den == 0:\n",
    "        return 0\n",
    "    \n",
    "    return num/den\n",
    "            \n",
    "  def eventCFReco(self, userId, eventId):    \n",
    "    \"\"\"\n",
    "    根据基于物品的协同过滤，得到 Event 的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "        for every item j tht u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    # 请补充完整代码\n",
    "    ans = 0.0\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "    I = self.itemsForUser[u]\n",
    "    \n",
    "    num = 0.0\n",
    "    den = 0.0\n",
    "    for j in I:\n",
    "        sim = self.sim_cal_ItemCF(i, j)\n",
    "        num += sim * self.userEventScores[u, j]\n",
    "        den += sim\n",
    "    \n",
    "    if den == 0: # 没有相似物品\n",
    "        return self.mu\n",
    "    \n",
    "    ans = num / den\n",
    "    if ans > 1:\n",
    "        ans = 1\n",
    "    if ans < 0:\n",
    "        ans = 0\n",
    "        \n",
    "    return ans\n",
    "    \n",
    "  def svdCFReco(self, userId, eventId):\n",
    "    # 基于模型的协同过滤, SVD++/LFM\n",
    "    u = self.userIndex[userId]\n",
    "    i = self.eventIndex[eventId]\n",
    "    \n",
    "    return self.pred_SVD(u,i)\n",
    "\n",
    "  def userReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于 User-based 协同过滤，只是用户之间的相似度由用户本身的属性得到，计算 event 的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i\n",
    "      for every other user v that has a preference for i\n",
    "        compute similarity s between u and v\n",
    "        incorporate v's preference for i weighted by s into running aversge\n",
    "    return top items ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "\n",
    "    vs = self.userEventScores[:, j]\n",
    "    sims = self.userSimMatrix[i, :]\n",
    "\n",
    "    prod = sims * vs\n",
    "\n",
    "    try:\n",
    "      return prod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      return 0\n",
    "\n",
    "  def eventReco(self, userId, eventId):\n",
    "    \"\"\"\n",
    "    类似基于 Item-based 协同过滤，只是 item 之间的相似度由 item 本身的属性得到，计算 Event 的推荐度\n",
    "    基本的伪代码思路如下：\n",
    "    for item i \n",
    "      for every item j that u has a preference for\n",
    "        compute similarity s between i and j\n",
    "        add u's preference for j weighted by s to a running average\n",
    "    return top items, ranked by weighted average\n",
    "    \"\"\"\n",
    "    i = self.userIndex[userId]\n",
    "    j = self.eventIndex[eventId]\n",
    "    js = self.userEventScores[i, :]\n",
    "    psim = self.eventPropSim[:, j]\n",
    "    csim = self.eventContSim[:, j]\n",
    "    pprod = js * psim\n",
    "    cprod = js * csim\n",
    "    \n",
    "    pscore = 0\n",
    "    cscore = 0\n",
    "    try:\n",
    "      pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    try:\n",
    "      cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "    except IndexError:\n",
    "      pass\n",
    "    return pscore, cscore\n",
    "\n",
    "  def userPop(self, userId):\n",
    "    \"\"\"\n",
    "    基于用户的朋友个数来推断用户的社交程度\n",
    "    主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "    \"\"\"\n",
    "    if self.userIndex.__contains__(userId):\n",
    "      i = self.userIndex[userId]\n",
    "      try:\n",
    "        return self.numFriends[0, i]\n",
    "      except IndexError:\n",
    "        return 0\n",
    "    else:\n",
    "      return 0\n",
    "\n",
    "  def friendInfluence(self, userId):\n",
    "    \"\"\"\n",
    "    朋友对用户的影响\n",
    "    主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event 的\n",
    "    用户的朋友圈如果都积极参与各种 event，可能会对当前用户有一定的影响\n",
    "    \"\"\"\n",
    "    nusers = np.shape(self.userFriends)[1]\n",
    "    i = self.userIndex[userId]\n",
    "    return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "  def eventPop(self, eventId):\n",
    "    \"\"\"\n",
    "    本活动本身的热度\n",
    "    主要是通过参与的人数来界定的\n",
    "    \"\"\"\n",
    "    i = self.eventIndex[eventId]\n",
    "    return self.eventPopularity[i, 0]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面 user-based 协同过滤 和 item-based 协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'wb')\n",
    "    \n",
    "    # 忽略第一行（列名字）\n",
    "    fin.readline().strip().split(','.encode(encoding=\"utf-8\"))\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "      ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "      if train:\n",
    "        ocolnames.append(\"interested\")\n",
    "        ocolnames.append(\"not_interested\")\n",
    "      fout.write((str.join(',', ocolnames) + '\\n').encode(encoding=\"utf-8\"))\n",
    "    \n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "      ln += 1\n",
    "      if ln%500 == 0:\n",
    "          print(\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "          # break;\n",
    "      \n",
    "      cols = line.strip().split(','.encode(encoding=\"utf-8\"))\n",
    "      userId = cols[0]\n",
    "      eventId = cols[1]\n",
    "      invited = cols[2]\n",
    "      \n",
    "      userCF_reco = RS.userCFReco(userId, eventId)\n",
    "      itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "      svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        \n",
    "      user_reco = RS.userReco(userId, eventId)\n",
    "      evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "      user_pop = RS.userPop(userId)\n",
    "     \n",
    "      frnd_infl = RS.friendInfluence(userId)\n",
    "      evt_pop = RS.eventPop(eventId)\n",
    "      ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "      if train:\n",
    "        ocols.append(cols[4]) # interested\n",
    "        ocols.append(cols[5]) # not_interested\n",
    "      fout.write((str.join(',', map(lambda x: str(x), ocols)) + \"\\n\").encode(encoding=\"utf-8\"))\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SVD Train...\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(b'123290209', b'1887085024')\n",
      "train.csv:1000 (userId, eventId)=(b'272886293', b'199858305')\n",
      "train.csv:1500 (userId, eventId)=(b'395305791', b'1582270949')\n",
      "train.csv:2000 (userId, eventId)=(b'527523423', b'3272728211')\n",
      "train.csv:2500 (userId, eventId)=(b'651258472', b'792632006')\n",
      "train.csv:3000 (userId, eventId)=(b'811791433', b'524756826')\n",
      "train.csv:3500 (userId, eventId)=(b'985547042', b'1269035551')\n",
      "train.csv:4000 (userId, eventId)=(b'1107615001', b'173949238')\n",
      "train.csv:4500 (userId, eventId)=(b'1236336671', b'3849306291')\n",
      "train.csv:5000 (userId, eventId)=(b'1414301782', b'2652356640')\n",
      "train.csv:5500 (userId, eventId)=(b'1595465532', b'955398943')\n",
      "train.csv:6000 (userId, eventId)=(b'1747091728', b'2131379889')\n",
      "train.csv:6500 (userId, eventId)=(b'1914182220', b'955398943')\n",
      "train.csv:7000 (userId, eventId)=(b'2071842684', b'1076364848')\n",
      "train.csv:7500 (userId, eventId)=(b'2217853337', b'3051438735')\n",
      "train.csv:8000 (userId, eventId)=(b'2338481531', b'2525447278')\n",
      "train.csv:8500 (userId, eventId)=(b'2489551967', b'520657921')\n",
      "train.csv:9000 (userId, eventId)=(b'2650493630', b'87962584')\n",
      "train.csv:9500 (userId, eventId)=(b'2791418962', b'4223848259')\n",
      "train.csv:10000 (userId, eventId)=(b'2903662804', b'2791462807')\n",
      "train.csv:10500 (userId, eventId)=(b'3036141956', b'3929507420')\n",
      "train.csv:11000 (userId, eventId)=(b'3176074542', b'3459485614')\n",
      "train.csv:11500 (userId, eventId)=(b'3285425249', b'2271782630')\n",
      "train.csv:12000 (userId, eventId)=(b'3410667855', b'1063772489')\n",
      "train.csv:12500 (userId, eventId)=(b'3531604778', b'2584839423')\n",
      "train.csv:13000 (userId, eventId)=(b'3686871863', b'53495098')\n",
      "train.csv:13500 (userId, eventId)=(b'3833637800', b'2415873572')\n",
      "train.csv:14000 (userId, eventId)=(b'3944021305', b'2096772901')\n",
      "train.csv:14500 (userId, eventId)=(b'4075466480', b'3567240505')\n",
      "train.csv:15000 (userId, eventId)=(b'4197193550', b'1628057176')\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(b'182290053', b'2529072432')\n",
      "test.csv:1000 (userId, eventId)=(b'433510318', b'4244463632')\n",
      "test.csv:1500 (userId, eventId)=(b'632808865', b'2845303452')\n",
      "test.csv:2000 (userId, eventId)=(b'813611885', b'2036538169')\n",
      "test.csv:2500 (userId, eventId)=(b'1010701404', b'303459881')\n",
      "test.csv:3000 (userId, eventId)=(b'1210932037', b'2529072432')\n",
      "test.csv:3500 (userId, eventId)=(b'1452921099', b'2705317682')\n",
      "test.csv:4000 (userId, eventId)=(b'1623287180', b'1626678328')\n",
      "test.csv:4500 (userId, eventId)=(b'1855201342', b'2603032829')\n",
      "test.csv:5000 (userId, eventId)=(b'2083900381', b'2529072432')\n",
      "test.csv:5500 (userId, eventId)=(b'2318415276', b'2509151803')\n",
      "test.csv:6000 (userId, eventId)=(b'2528161539', b'4025975316')\n",
      "test.csv:6500 (userId, eventId)=(b'2749110768', b'4244406355')\n",
      "test.csv:7000 (userId, eventId)=(b'2927772127', b'1532377761')\n",
      "test.csv:7500 (userId, eventId)=(b'3199685636', b'1776393554')\n",
      "test.csv:8000 (userId, eventId)=(b'3393388475', b'680270887')\n",
      "test.csv:8500 (userId, eventId)=(b'3601169721', b'154434302')\n",
      "test.csv:9000 (userId, eventId)=(b'3828963415', b'3067222491')\n",
      "test.csv:9500 (userId, eventId)=(b'4018723397', b'2522610844')\n",
      "test.csv:10000 (userId, eventId)=(b'4180064266', b'2658555390')\n"
     ]
    }
   ],
   "source": [
    "RS = RecommenderSystem()\n",
    "print(\"生成训练数据...\\n\")\n",
    "generateRSData(RS,train=True,  header=True)\n",
    "\n",
    "print(\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到 event 的时间与 event 开始时间的差、用户地点和 event 地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
