{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import division\n",
    "\n",
    "import pickle as pickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random\n",
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RecommonderSystem:\n",
    "    def __init__(self):\n",
    "        # 读入数据做初始化\n",
    "\n",
    "        # 用户和活动新的索引\n",
    "        self.userIndex = pickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "        self.eventIndex = pickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "        self.n_users = len(self.userIndex)\n",
    "        self.n_items = len(self.eventIndex)\n",
    "\n",
    "        # 用户-活动关系矩阵R\n",
    "        # 在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "        self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "\n",
    "        # 倒排表\n",
    "        ##每个用户参加的事件\n",
    "        self.eventsForUser = pickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "        ##事件参加的用户\n",
    "        self.usersForEvent = pickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "\n",
    "        # 基于模型的协同过滤参数初始化,训练\n",
    "        self.init_SVD()\n",
    "        self.train_SVD(trainfile=\"train.csv\")\n",
    "\n",
    "        # 根据用户属性计算出的用户之间的相似度\n",
    "        self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "\n",
    "        # 根据活动属性计算出的活动之间的相似度\n",
    "        self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "        self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "\n",
    "        # 每个用户的朋友的数目\n",
    "        self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "        # 用户的每个朋友参加活动的分数对该用户的影响\n",
    "        self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "\n",
    "        # 活动本身的热度\n",
    "        self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "\n",
    "    def init_SVD(self, K=20):\n",
    "        # 初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "        self.K = K\n",
    "\n",
    "        # init parameters\n",
    "        # bias\n",
    "        self.bu = np.zeros((self.n_users,1))\n",
    "        self.bi = np.zeros((1,self.n_items))\n",
    "\n",
    "        # the small matrix\n",
    "        self.P = np.random.rand(self.n_users,self.K)\n",
    "        self.Q = np.random.rand(self.K,self.n_items)\n",
    "\n",
    "        # self.P = random((self.n_users, self.K)) / 10 * (np.sqrt(self.K))\n",
    "        # self.Q = random((self.K, self.n_items)) / 10 * (np.sqrt(self.K))\n",
    "\n",
    "    def train_SVD(self, trainfile='train.csv', steps=1000000, gamma=0.04, Lambda=0.15):\n",
    "        # 训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "        # gamma：为学习率\n",
    "        # Lambda：正则参数\n",
    "\n",
    "        # 偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "        print(\"enter SVD Train...\")\n",
    "        ftrain = open(trainfile, 'r')\n",
    "        ftrain.readline()\n",
    "        self.mu = 0.0\n",
    "        n_records = 0\n",
    "        uids = []  # 每条记录的用户索引\n",
    "        i_ids = []  # 每条记录的item索引\n",
    "        # 用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "        R = np.zeros((self.n_users, self.n_items))\n",
    "        E = np.zeros((self.n_users, self.n_items))\n",
    "        gradient_bu=np.zeros((self.n_users, 1))\n",
    "        gradient_bi=np.zeros((1,self.n_items))\n",
    "        gradient_puk=np.zeros((self.n_users, self.K))\n",
    "        gradient_qki=np.zeros((self.K,self.n_items))\n",
    "        for line in ftrain:\n",
    "            cols = line.strip().split(\",\")\n",
    "            u = self.userIndex[cols[0]]  # 用户\n",
    "            i = self.eventIndex[cols[1]]  # 活动\n",
    "\n",
    "            uids.append(u)\n",
    "            i_ids.append(i)\n",
    "\n",
    "            R[u, i] = int(cols[4])  # interested\n",
    "            self.mu += R[u, i]\n",
    "            n_records += 1\n",
    "\n",
    "        ftrain.close()\n",
    "        self.mu /= n_records\n",
    "\n",
    "        print(\"SVD trained begin\")\n",
    "        # 分别计算上述四个目标矩阵的梯度\n",
    "        # 沿着负梯度的方向，按传进来的步长进行参数更新\n",
    "        # 以上步骤循环执行，当检测到循环次数达到传进来的设定循环次数之后，也可设置loss的大小，然后终止梯度下降算法，变量更新如下\n",
    "        #bu=bu+gamma（eui-lamada(bu))\n",
    "        #bi=bi+gamma(eui-lamada(bi))\n",
    "        #puk=puk+gamma(eui**qki-lamada(puk))\n",
    "        #qki=qki+gamma(eui**quk-lamada(qki))\n",
    "        for number in range(steps):\n",
    "            uidIndex=np.random.randint(0, self.n_users)\n",
    "            eventIndex=np.random.randint(0, self.n_items)\n",
    "            # print('uidIndex is ',uidIndex)\n",
    "            # print('eventIndex is ',eventIndex)\n",
    "            eui=R[uidIndex,eventIndex]-self.pred_SVD(uidIndex, eventIndex)\n",
    "            rmse=eui**2\n",
    " \n",
    "            # print('rmse is ',rmse)\n",
    "            gradient_bu= Lambda * self.bu[uidIndex,0] - eui\n",
    "            gradient_bi= Lambda * self.bi[0, eventIndex]- eui\n",
    "            gradient_puk= Lambda * self.P[uidIndex,:]-eui*self.Q[:,eventIndex]\n",
    "            gradient_qki= Lambda * self.Q[:,eventIndex] - eui*self.P[uidIndex,:]\n",
    "            self.bu[uidIndex,0] = self.bu[uidIndex,0] - gamma * gradient_bu\n",
    "            self.bi[0, eventIndex]  = self.bi[0, eventIndex] - gamma * gradient_bi\n",
    "            self.P[uidIndex,:] = self.P[uidIndex,:]- gamma * gradient_puk\n",
    "            self.Q[:,eventIndex] = self.Q[:,eventIndex]- gamma * gradient_qki\n",
    "        #gamma=gamma*0.93 # gamma可以0.93的学习率递减\n",
    "        print(\"SVD trained end\")\n",
    "\n",
    "\n",
    "    def pred_SVD(self, uidIndex, eventIndex):\n",
    "        # 根据当前参数，预测用户uid对Item（i_id）的打分\n",
    "        ans = self.mu + self.bi[0,eventIndex]+self.bu[uidIndex,0]+np.dot(self.P[uidIndex, :], self.Q[:, eventIndex])\n",
    "        # 将打分范围控制在0-1之间\n",
    "        # print('pred_SVD is ',ans)\n",
    "        if ans > 0.7:\n",
    "            return 1\n",
    "        elif ans < 0:\n",
    "            return 0\n",
    "        return ans\n",
    "\n",
    "    def sim_cal_UserCF(self, uid1, uid2):\n",
    "        # 基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "        similarity = 0\n",
    "        uidScoresMean2 = 0\n",
    "        # 找到两个用户共同被展示的event,取交集，利用eventsForUser\n",
    "        eventsForUser1 = set()\n",
    "        eventsForUser2 = set()\n",
    "        sameEnent = set()\n",
    "        eventsForUser1 = self.eventsForUser[uid1]\n",
    "        eventsForUser2 = self.eventsForUser[uid2]\n",
    "        sameEvent = eventsForUser1 & eventsForUser2\n",
    "        # 计算用户对相同商品的打分均值,利用userEventScores\n",
    "        countSameEvent = len(sameEvent)\n",
    "        if (countSameEvent > 0):#计算出均值\n",
    "            uid1ScoresMean = np.mean(self.userEventScores[uid1, [i for i in eventsForUser1]])\n",
    "            uid2ScoresMean = np.mean(self.userEventScores[uid2, [i for i in eventsForUser2]])\n",
    "            sum_up = 0\n",
    "            sum_down_user1 = 0\n",
    "            sum_down_user2 = 0\n",
    "            for i in sameEvent:#计算每一个和均值的差值，最后通过公式，up为分子项，down为分母项\n",
    "                tempUserResult1 = self.userEventScores[uid1, i] - uid1ScoresMean\n",
    "                tempUserResult2 = self.userEventScores[uid2, i] - uid2ScoresMean\n",
    "                sum_up += tempUserResult1 * tempUserResult2\n",
    "                sum_down_user1 += tempUserResult1 ** 2\n",
    "                sum_down_user2 += tempUserResult2 ** 2\n",
    "            if ((sum_down_user1 != 0) and (sum_down_user2 != 0)):\n",
    "                similarity = sum_up / (np.sqrt(sum_down_user1) * np.sqrt(sum_down_user2))\n",
    "        # print('sim_cal_UserCF result is ',similarity)\n",
    "        return similarity, uid2ScoresMean\n",
    "\n",
    "    def userCFReco(self, userId, eventId):\n",
    "        # 根据User-based协同过滤，得到event的推荐度\n",
    "        #  userID转换为usder index eventId转换为event index 根据eventId，找到还有哪些用户也被展示了\n",
    "        ans = 0.0\n",
    "        # 相似度阈值设置为0.5，大于该阈值，才认为强相关，再参与权重计算\n",
    "        threshold = 0.5\n",
    "        uidIndex = self.userIndex[userId]\n",
    "        eventIndex = self.eventIndex[eventId]\n",
    "        sameUsersForEvent = self.usersForEvent[eventIndex]\n",
    "        # 逐个计算其他用户和当前用户的相似度\n",
    "        sum_up = 0\n",
    "        sum_down = 0\n",
    "        countSameUser = len(sameUsersForEvent)\n",
    "        if (countSameUser > 1):\n",
    "            for otherIdIndex in sameUsersForEvent:\n",
    "                if (otherIdIndex != uidIndex):\n",
    "                    sim_cal_User, ScoreotherMeans = self.sim_cal_UserCF(uidIndex, otherIdIndex)\n",
    "                    if sim_cal_User > threshold:\n",
    "                        sum_up += sim_cal_User * (self.userEventScores[otherIdIndex, eventIndex] - ScoreotherMeans)\n",
    "                        sum_down += sim_cal_User\n",
    "            if sum_down != 0:\n",
    "                # 计算用户1 展示过的所有event的均值\n",
    "                eventsForUser1 = self.eventsForUser[uidIndex]\n",
    "                uidScoresMean1 = np.mean(self.userEventScores[uidIndex, [i for i in eventsForUser1]])\n",
    "                ans = uidScoresMean1 + sum_up / sum_down\n",
    "        # print('userCFReco result is ',ans)\n",
    "        return ans\n",
    "\n",
    "    def sim_cal_EventCF(self, i_id1, i_id2):\n",
    "        # 基于event的协同过滤中的两个event1和event2之间的相似度，和基于用户协同过滤类似，找到相同物品的不同的用户，然后进行计算\n",
    "        similarity = 0\n",
    "        eventScoresMean2 = 0\n",
    "        # 找到两个event共同被展示的user,取交集，usersForEvent\n",
    "        userForEvent1 = set()\n",
    "        userForEvent2 = set()\n",
    "        sameUser = set()\n",
    "        userForEvent1 = self.usersForEvent[i_id1]\n",
    "        userForEvent2 = self.usersForEvent[i_id2]\n",
    "        sameUser = userForEvent1 & userForEvent2\n",
    "        # 计算用户对相同商品的打分均值,利用userEventScores\n",
    "        countSameUser = len(sameUser)\n",
    "        if (countSameUser > 0):\n",
    "            eventScoresMean1 = np.mean(self.userEventScores[[i for i in userForEvent1], i_id1])\n",
    "            eventScoresMean2 = np.mean(self.userEventScores[[i for i in userForEvent2], i_id2])\n",
    "            sum_up = 0\n",
    "            sum_down_event1 = 0\n",
    "            sum_down_event2 = 0\n",
    "            for i in sameUser:\n",
    "                tempEventResult1 = self.userEventScores[i, i_id1] - eventScoresMean1\n",
    "                tempEventResult2 = self.userEventScores[i, i_id2] - eventScoresMean2\n",
    "                sum_up += tempEventResult1 * tempEventResult2\n",
    "                sum_down_event1 += tempEventResult1 ** 2\n",
    "                sum_down_event2 += tempEventResult2 ** 2\n",
    "            if ((sum_down_event1 != 0) and (sum_down_event2 != 0)):\n",
    "                similarity = sum_up / (np.sqrt(sum_down_event1) * np.sqrt(sum_down_event2))\n",
    "        # print('sim_cal_EventCF result is ',similarity)\n",
    "        return similarity, eventScoresMean2\n",
    "\n",
    "    def eventCFReco(self, userId, eventId):\n",
    "        # 根据基于物品的协同过滤，得到Event的推荐度\n",
    "        #  userID转换为usder index eventId转换为event index 根据userID，找到还有哪些event被推荐给了该用户\n",
    "        ans = 0.0\n",
    "        # 相似度阈值设置为0.8，大于该阈值，才认为强相关，再参与权重计算\n",
    "        threshold = 0.8\n",
    "        uidIndex = self.userIndex[userId]\n",
    "        eventIndex = self.eventIndex[eventId]\n",
    "        sameEventsForUser = self.eventsForUser[uidIndex]\n",
    "        # 逐个计算其他event和当前event的相似度\n",
    "        sum_up = 0\n",
    "        sum_down = 0\n",
    "        countSameEvent = len(sameEventsForUser)\n",
    "        if (countSameEvent > 1):\n",
    "            for otherEventIndex in sameEventsForUser:\n",
    "                if (otherEventIndex != eventIndex):\n",
    "                    sim_cal_Event, otherMeanScores = self.sim_cal_EventCF(eventIndex, otherEventIndex)\n",
    "                    if sim_cal_Event > threshold:\n",
    "                        sum_up += sim_cal_Event * (self.userEventScores[uidIndex, otherEventIndex] - otherMeanScores)\n",
    "                        sum_down += sim_cal_Event\n",
    "            if sum_down != 0:\n",
    "                # 计算event展示过的所有user的均值\n",
    "                usersForEvent1 = self.usersForEvent[eventIndex]\n",
    "                eventScoresMean1 = np.mean(self.userEventScores[[i for i in usersForEvent1], eventIndex])\n",
    "                ans = eventScoresMean1 + sum_up / sum_down\n",
    "        # print('eventCFReco result is ',ans)\n",
    "        return ans\n",
    "\n",
    "    def svdCFReco(self, userId, eventId):\n",
    "        # 基于模型的协同过滤, SVD++/LFM\n",
    "        u = self.userIndex[userId]\n",
    "        i = self.eventIndex[eventId]\n",
    "\n",
    "        return self.pred_SVD(u, i)\n",
    "\n",
    "    def userReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "          for every other user v that has a preference for i\n",
    "            compute similarity s between u and v\n",
    "            incorporate v's preference for i weighted by s into running aversge\n",
    "        return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "\n",
    "        vs = self.userEventScores[:, j]\n",
    "        sims = self.userSimMatrix[i, :]\n",
    "\n",
    "        prod = sims * vs\n",
    "\n",
    "        try:\n",
    "            return prod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            return 0\n",
    "\n",
    "    def eventReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于Item-based协同过滤，只是item之间的相似度由item本身的属性得到，计算Event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "          for every item j that u has a preference for\n",
    "            compute similarity s between i and j\n",
    "            add u's preference for j weighted by s to a running average\n",
    "        return top items, ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "        js = self.userEventScores[i, :]\n",
    "        psim = self.eventPropSim[:, j]\n",
    "        csim = self.eventContSim[:, j]\n",
    "        pprod = js * psim\n",
    "        cprod = js * csim\n",
    "\n",
    "        pscore = 0\n",
    "        cscore = 0\n",
    "        try:\n",
    "            pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        try:\n",
    "            cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        return pscore, cscore\n",
    "\n",
    "    def userPop(self, userId):\n",
    "        \"\"\"\n",
    "        基于用户的朋友个数来推断用户的社交程度\n",
    "        主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "        \"\"\"\n",
    "        if userId in self.userIndex:\n",
    "            i = self.userIndex[userId]\n",
    "            try:\n",
    "                return self.numFriends[0, i]\n",
    "            except IndexError:\n",
    "                return 0\n",
    "        else:\n",
    "            return 0\n",
    "\n",
    "    def friendInfluence(self, userId):\n",
    "        \"\"\"\n",
    "        朋友对用户的影响\n",
    "        主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "        用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "        \"\"\"\n",
    "        nusers = np.shape(self.userFriends)[1]\n",
    "        i = self.userIndex[userId]\n",
    "        return (self.userFriends[i, :].sum(axis=0) / nusers)[0, 0]\n",
    "\n",
    "    def eventPop(self, eventId):\n",
    "        \"\"\"\n",
    "        本活动本身的热度\n",
    "        主要是通过参与的人数来界定的\n",
    "        \"\"\"\n",
    "        i = self.eventIndex[eventId]\n",
    "        return self.eventPopularity[i, 0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'r')\n",
    "    fout = open(\"RS_\" + fn, 'w')\n",
    "\n",
    "    # 忽略第一行（列名字）\n",
    "    fin.readline().strip().split(\",\")\n",
    "\n",
    "    # write output header\n",
    "    if header:\n",
    "        ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\", \"svdCF_reco\", \"user_reco\", \"evt_p_reco\",\n",
    "                     \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "        if train:\n",
    "            ocolnames.append(\"interested\")\n",
    "            ocolnames.append(\"not_interested\")\n",
    "        fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "\n",
    "    ln = 0\n",
    "    for line in fin:\n",
    "        ln += 1\n",
    "        if ln % 500 == 0:\n",
    "            print(\"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId))\n",
    "\n",
    "            # break;\n",
    "\n",
    "        cols = line.strip().split(\",\")\n",
    "        userId = cols[0]\n",
    "        eventId = cols[1]\n",
    "        invited = cols[2]\n",
    "\n",
    "        userCF_reco = RS.userCFReco(userId, eventId)\n",
    "        itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "        svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "\n",
    "        user_reco = RS.userReco(userId, eventId)\n",
    "        evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "        user_pop = RS.userPop(userId)\n",
    "\n",
    "        frnd_infl = RS.friendInfluence(userId)\n",
    "        evt_pop = RS.eventPop(eventId)\n",
    "        ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco, user_reco, evt_p_reco,\n",
    "                 evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "\n",
    "        if train:\n",
    "            ocols.append(cols[4])  # interested\n",
    "            ocols.append(cols[5])  # not_interested\n",
    "        fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "\n",
    "    fin.close()\n",
    "    fout.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "enter SVD Train...\n",
      "SVD trained begin\n",
      "SVD trained end\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n",
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "print(\"生成训练数据...\\n\")\n",
    "generateRSData(RS, train=True, header=True)\n",
    "\n",
    "print(\"生成预测数据...\\n\")\n",
    "generateRSData(RS, train=False, header=True)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
