{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 将所有特征串联起来，构成RS_Train.csv\n",
    "#RS_Test.csv\n",
    "#为最后推荐系统做准备\n",
    "from __future__ import division\n",
    "\n",
    "import cPickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "import math\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "from random import choice\n",
    "\n",
    "class RecommonderSystem:\n",
    "    def __init__(self):\n",
    "        # 读入数据做初始化\n",
    "\n",
    "        #用户和活动新的索引\n",
    "        self.userIndex = cPickle.load(open(\"PE_userIndex.pkl\", 'rb'))\n",
    "        self.eventIndex = cPickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "        \n",
    "        self.n_users = len(self.userIndex)\n",
    "        self.n_items = len(self.eventIndex)\n",
    "\n",
    "        #用户-活动关系矩阵R\n",
    "        #在train_SVD会重新从文件中读取,二者要求的格式不同，来不及统一了:(\n",
    "        self.userEventScores = sio.mmread(\"PE_userEventScores\").todense()\n",
    "\n",
    "        #倒排表\n",
    "        ##每个用户参加的事件\n",
    "        self.itemsForUser = cPickle.load(open(\"PE_eventsForUser.pkl\", 'rb'))\n",
    "        ##事件参加的用户\n",
    "        self.usersForItem = cPickle.load(open(\"PE_usersForEvent.pkl\", 'rb'))\n",
    "\n",
    "        #基于模型的协同过滤参数初始化,训练\n",
    "        self.init_SVD()\n",
    "        self.train_SVD(trainfile = \"train.csv\")\n",
    "\n",
    "        #根据用户属性计算出的用户之间的相似度\n",
    "        self.userSimMatrix = sio.mmread(\"US_userSimMatrix\").todense()\n",
    "\n",
    "        #根据活动属性计算出的活动之间的相似度\n",
    "        self.eventPropSim = sio.mmread(\"EV_eventPropSim\").todense()\n",
    "        self.eventContSim = sio.mmread(\"EV_eventContSim\").todense()\n",
    "\n",
    "        #每个用户的朋友的数目\n",
    "        self.numFriends = sio.mmread(\"UF_numFriends\")\n",
    "        #用户的每个朋友参加活动的分数对该用户的影响\n",
    "        self.userFriends = sio.mmread(\"UF_userFriends\").todense()\n",
    "\n",
    "        #活动本身的热度\n",
    "        self.eventPopularity = sio.mmread(\"EA_eventPopularity\").todense()\n",
    "        \n",
    "#         self.test()\n",
    "\n",
    "    def init_SVD(self, K=20):\n",
    "        #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "        self.K = K  \n",
    "\n",
    "        #init parameters\n",
    "        #bias\n",
    "        self.bi = np.zeros(self.n_items)  \n",
    "        self.bu = np.zeros(self.n_users)  \n",
    "\n",
    "        #the small matrix\n",
    "        self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "        self.Q = random((self.K, self.n_items))/10*(np.sqrt(self.K))  \n",
    "\n",
    "          \n",
    "    def train_SVD(self,trainfile = 'train.csv', steps=1000,gamma=0.25,Lambda=0.15):\n",
    "        #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "        #gamma：为学习率\n",
    "        #Lambda：正则参数\n",
    "\n",
    "        #偷懒了，为了和原来的代码的输入接口一样，直接从训练文件中去读取数据\n",
    "        #print \"SVD Train...\"\n",
    "        ftrain = open(trainfile, 'r')\n",
    "        ftrain.readline()\n",
    "        self.mu = 0.0\n",
    "        n_records = 0\n",
    "        uids = []  #每条记录的用户索引\n",
    "        i_ids = [] #每条记录的item索引\n",
    "        user_item_pairs = [] # 有效的用户活动记录\n",
    "        self.rmse = [] # 记录每次迭代的RMSE值\n",
    "        \n",
    "        #用户-Item关系矩阵R（内容同userEventScores相同），临时变量，训练完了R不再需要\n",
    "        R = np.zeros((self.n_users, self.n_items))\n",
    "\n",
    "        for line in ftrain:\n",
    "            cols = line.strip().split(\",\")\n",
    "            u = self.userIndex[cols[0]]  #用户\n",
    "            i = self.eventIndex[cols[1]] #活动\n",
    "            #获得有效的用户,活动记录\n",
    "            user_item_pairs.append((u,i))\n",
    "            \n",
    "            uids.append(u)\n",
    "            i_ids.append(i)\n",
    "\n",
    "            R[u,i] = int(cols[4])  #interested\n",
    "            self.mu += R[u,i]\n",
    "            n_records += 1\n",
    "            \n",
    "           \n",
    "\n",
    "        ftrain.close()\n",
    "        self.mu /= n_records\n",
    "        \n",
    "        #初始将学习率设定比较大\n",
    "        gamma = 1\n",
    "        #将步数跟训练数据一致\n",
    "        steps = n_records\n",
    "        # 请补充完整SVD模型训练过程\n",
    "        rmse = 0.0\n",
    "        # 用随机梯度下降 打乱原始用户对\n",
    "        np.random.shuffle(user_item_pairs)\n",
    "        \n",
    "        for step in range(0,steps):\n",
    "            \n",
    "            #得到偏差值\n",
    "            errors = [R[u,i] - self.pred_SVD(u,i) for u, i in user_item_pairs]\n",
    "            rmse = np.sqrt(np.sum(np.square(errors)) / n_records)\n",
    "#             print('rmse = {}'.format(mse))\n",
    "            self.rmse.append(rmse)\n",
    "    \n",
    "            #调整学习率\n",
    "            if step > 2 and self.rmse[step] - self.rmse[step-1] < 0.01:\n",
    "                gamma *= 0.5\n",
    "                \n",
    "            #输出结果\n",
    "            if step % 1000 == 0:\n",
    "                print('in step {}, rmse: {}'.format(step,rmse))\n",
    "#             print('\\nin step {}, rmse = {}'.format(step, self.rmse))\n",
    "            \n",
    "            #开始迭代\n",
    "            u, i = user_item_pairs[step]\n",
    "            eui = R[u,i] - self.pred_SVD(u,i)\n",
    "            \n",
    "            #更新P和Q的矩阵\n",
    "            for k in range(self.K):\n",
    "                self.P[u,k] += gamma * self.Q[k,i] * eui - Lambda * self.P[u,k]\n",
    "                self.Q[k,i] += gamma * self.P[u,k] * eui - Lambda * self.Q[k,i]\n",
    "                self.bu[u] += gamma * eui - Lambda * self.bu[u]            \n",
    "                self.bi[i] += gamma * eui - Lambda * self.bi[i]\n",
    "                \n",
    "# gamma *= 0.9\n",
    "     \n",
    "        print \"SVD trained\"\n",
    "\n",
    "    def pred_SVD(self, uid, i_id):\n",
    "        #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "        ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "\n",
    "        #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans  \n",
    "\n",
    "    #返回用户uid对被展示items的打分列表\n",
    "    def get_user_score_list(self, uid):\n",
    "        if len(self.itemsForUser[uid]) == 0:\n",
    "            return 0\n",
    "        return self.userEventScores[uid, [i for i in self.itemsForUser[uid]]]\n",
    "    \n",
    "    #返回用户uid对items打分的平均值\n",
    "    def get_user_mean_score(self, uid):\n",
    "        return np.mean(self.get_user_score_list(uid))\n",
    "        \n",
    "    def sim_cal_UserCF(self, uid1, uid2 ):\n",
    "        #请补充基于用户的协同过滤中的两个用户uid1和uid2之间的相似度（根据两个用户对item打分的相似度）\n",
    "        similarity = 0.0\n",
    "        events_for_uid1 = self.itemsForUser[uid1]\n",
    "        events_for_uid2 = self.itemsForUser[uid2]\n",
    "        #print('sim_cal_UserCF : \\n items for user {} {}'.format(uid1, events_for_uid1))\n",
    "        #print('sim_cal_UserCF : \\n items for user {} {}'.format(uid2, events_for_uid2))\n",
    "        \n",
    "        #print('sub set of two sets:{}'.format(events_for_uid1&events_for_uid2))\n",
    "        \n",
    "        #对两个感兴趣的活动求交集\n",
    "        events_both_shown = events_for_uid1&events_for_uid2\n",
    "        if len(events_both_shown) > 0:\n",
    "            #得到用户uid1 和uid2 对交集活动的打分列表\n",
    "            scores1 = self.userEventScores[uid1, [i for i in events_both_shown]]\n",
    "            scores2 = self.userEventScores[uid2, [i for i in events_both_shown]]\n",
    "            #print('sim_cal_UserCF : \\n scores1= {}, and scores2 is {}'.format(scores1, scores2))\n",
    "            \n",
    "            #计算用户对活动打分的平均值\n",
    "            mean_u1 = np.array(self.get_user_mean_score(uid1))\n",
    "            mean_u2 = np.array(self.get_user_mean_score(uid2))\n",
    "            #print('sim_cal_UserCF : \\n mean1 is {}, mean2 is {}'.format(mean_u1, mean_u2))\n",
    "            \n",
    "            #准备计算相似度的分子分母\n",
    "            adjust_scores_u1 = np.array(scores1 - mean_u1) # 括号内的部分\n",
    "            adjust_scores_u2 = np.array(scores2 - mean_u2) # 括号内的部分\n",
    "            #print('sim_cal_UserCF : \\n adjust scores for u1 {} and for u2 {}:'.format(adjust_scores_u1, adjust_scores_u2))\n",
    "            \n",
    "            #计算分子\n",
    "            numerator = np.sum(adjust_scores_u1 * adjust_scores_u2) # 分子\n",
    "            #print ('sim_cal_UserCF : \\n numerator is {}'.format(numerator))\n",
    "            \n",
    "            #计算分母\n",
    "            sq_u1 = adjust_scores_u1 ** 2 # 平方\n",
    "            sq_u2 = adjust_scores_u2 ** 2 # 平方    \n",
    "            dominator = math.sqrt(np.sum(sq_u1)) * math.sqrt(np.sum(sq_u2))\n",
    "            if dominator == 0: # 分母为0, 直接返回相似度为0\n",
    "                return 0\n",
    "            #print('sim_cal_UserCF : \\n dominator is {}'.format(dominator))\n",
    "            \n",
    "            #计算相似度\n",
    "            similarity = numerator / dominator\n",
    "            #print('sim_cal_UserCF : \\n similarity is {}'.format(similarity))\n",
    "            \n",
    "            return similarity\n",
    "        else :\n",
    "            return 0\n",
    "        \n",
    "\n",
    "    def userCFReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        根据User-based协同过滤，得到event的推荐度\n",
    "        return:\n",
    "            返回针对用户userId, eventId的推荐度\n",
    "        \"\"\"\n",
    " \n",
    "        #请补充完整代码\n",
    "        ans = 0.0\n",
    "        #获得userId 和eventId 的索引\n",
    "        u1 = self.userIndex[userId]\n",
    "        eventIndex = self.eventIndex[eventId]\n",
    "        \n",
    "        #获得用户A对活动的平均打分\n",
    "        mean_u1 = self.get_user_mean_score(u1)\n",
    "        #print('mean score for u1 is {}'.format(mean_u1))\n",
    "        #先得到所有与User互相关联的其他用户\n",
    "        related_users = self.usersForItem[eventIndex]\n",
    "        \n",
    "        #没有与当前用户相关的用户, 直接返回打分均值\n",
    "        if len(related_users) == 0 :\n",
    "            ans = mean_u1 + 0\n",
    "            #print('no related user, return mean score of user :{}'.format(ans))\n",
    "            return ans\n",
    "        \n",
    "        #计算预测值函数中的分子分母\n",
    "        numerator = 0.000000001\n",
    "        dominator = 0.000000001\n",
    "        similarities = []\n",
    "        scores_for_event = []\n",
    "        for u2 in related_users:\n",
    "            if u2 == u1:\n",
    "                #print('get the same user, skip')\n",
    "                continue\n",
    "            \n",
    "            similarity = self.sim_cal_UserCF(u1, u2)\n",
    "            mean_u2 = self.get_user_mean_score(u2)# 用户B的平均打分\n",
    "            #print('similarity is {} between user {} and user {}'.format(similarity, u1, u2))\n",
    "            if similarity > 0.8 :  # 设定相似度阈值,来计算预测值\n",
    "                score_by_u2 = self.userEventScores[u2, eventIndex] #用户B对event的打分\n",
    "                scores_for_event.append(score_by_u2) \n",
    "                numerator += similarity * (score_by_u2 - mean_u2)\n",
    "                dominator += similarity\n",
    "                #print('similarity is bigger than 0.8 numerator is {}, dominator is {}'.format(numerator, dominator))\n",
    "            similarities.append(similarity)\n",
    "        #print('similarities are: {}'.format(similarities))\n",
    "        #print('scores for event are:{}'.format(scores_for_event))\n",
    "        \n",
    "        #计算推荐度\n",
    "        ans = numerator / dominator + mean_u1\n",
    "        #print('!!!!!!!\\n\\n\\npred score is {}'.format(ans))\n",
    "        return ans\n",
    "\n",
    "    def sim_cal_ItemCF(self, i_id1, i_id2):\n",
    "        #计算Item i_id1和i_id2之间的相似性\n",
    "        similarity = 0.0\n",
    "        #得到与i_id1, i_id2相关的用户集\n",
    "        users_for_i1 = self.usersForItem[i_id1]\n",
    "        users_for_i2 = self.usersForItem[i_id2]\n",
    "        common_users = users_for_i1 & users_for_i2\n",
    "#         #print('sim_cal_ItemCF:\\nuser who engaged in event {}: {}'.format(i_id1, users_for_i1))\n",
    "#         #print('sim_cal_ItemCF:\\nuser who engaged in event {}: {}'.format(i_id2, users_for_i2))\n",
    "#         #print('sim_cal_ItemCF:\\n common users are:{}'.format(common_users))\n",
    "        #没有共同打分的用户,返回相似度为0\n",
    "        if len(common_users) == 0 :\n",
    "            return similarity\n",
    "\n",
    "        #初始化分子分母\n",
    "        numerator = 0.0000001\n",
    "        dominator = 0.0000001\n",
    "        \n",
    "        #分别得到共同用户对 i1 和 i2 的打分列表\n",
    "        scores_1 = self.userEventScores[[u for u in common_users],i_id1]\n",
    "        scores_2 = self.userEventScores[[u for u in common_users],i_id2]\n",
    "#         #print('for item1 {}, scores are {}, \\n for item2{}, scores are{}'.format(i_id1, scores_1, i_id2, scores_2))\n",
    "\n",
    "               \n",
    "        #转置一下, 之前得到的是列向量 我也不知道为什么\n",
    "        scores_1 = np.transpose(scores_1)\n",
    "        scores_2 = np.transpose(scores_2)\n",
    "#         #print('for item1 {}, scores are {}, \\n for item2{}, scores are{}'.format(i_id1, scores_1, i_id2, scores_2))\n",
    "\n",
    "     \n",
    "        #共同用户打分的均值列表\n",
    "        mean_scores = []\n",
    "        for u in common_users:\n",
    "            mean_scores.append(self.get_user_mean_score(u))\n",
    "        #print('common users mean score list : {}'.format(mean_scores))\n",
    "        #对打分列表进行减去均值的调整\n",
    "        scores_1_adjust = np.array(scores_1 - mean_scores)\n",
    "        scores_2_adjust = np.array(scores_2 - mean_scores)\n",
    "        #print('scores_1_adjust:{} scores_2_adjust:{}'.format(scores_1_adjust, scores_2_adjust))\n",
    "        \n",
    "        numerator += np.sum(scores_1_adjust * scores_2_adjust)\n",
    "        dominator += math.sqrt(np.sum(scores_1_adjust **2) * np.sum(scores_2_adjust ** 2))\n",
    "        #print('numerator:{}, dominator:{}'.format(numerator, dominator))\n",
    "        \n",
    "        similarity = numerator / dominator\n",
    "        #print('!!!!similarity is {}\\n\\n\\n'.format(similarity))\n",
    "\n",
    "        return similarity\n",
    "\n",
    "    def eventCFReco(self, userId, eventId):    \n",
    "        \"\"\"\n",
    "        根据基于物品的协同过滤，得到Event的推荐度\n",
    "        return:\n",
    "            返回针对用户userId, eventId的推荐度\n",
    "        \"\"\"\n",
    "        ans = 0.0\n",
    "        #得到userId 和 eventId 的索引\n",
    "        u = self.userIndex[userId]\n",
    "        e = self.eventIndex[eventId]\n",
    "        \n",
    "        #初始化预测值的分子分母\n",
    "        numerator = 0.0000001\n",
    "        dominator = 0.0000001\n",
    "        \n",
    "        #得到用户u打过分的item集合\n",
    "        events = self.itemsForUser[u]\n",
    "        if len(events) == 0:\n",
    "            return ans\n",
    "        \n",
    "        #print('eventCFReco: \\nevents which user {} scored: {}'.format(u, events))\n",
    "        for i in events:\n",
    "            if i == e: #剔除掉跟自己一样的item\n",
    "                continue\n",
    "            similarity = self.sim_cal_ItemCF(e, i) # 物品e与i的相似度\n",
    "            numerator += similarity * self.userEventScores[u, i]\n",
    "            dominator += similarity\n",
    "            #print('similarity between {} and {} is : {}, numerator:{}, donimator:{}'.format(e,i,similarity, numerator,dominator))\n",
    "#         #print('eventCFReco: Weight of event {} to user {} is: {}'.format(eventId, userId, ans))\n",
    "        ans = numerator / dominator\n",
    "        #print('!!!!!ans is {}\\n\\n\\n'.format(ans))\n",
    "        return ans\n",
    "    \n",
    "    def svdCFReco(self, userId, eventId):\n",
    "        #基于模型的协同过滤, SVD++/LFM\n",
    "        u = self.userIndex[userId]\n",
    "        i = self.eventIndex[eventId]\n",
    "\n",
    "        return self.pred_SVD(u,i)\n",
    "\n",
    "    def userReco(self, userId, eventId):\n",
    "        \"\"\"\n",
    "        类似基于User-based协同过滤，只是用户之间的相似度由用户本身的属性得到，计算event的推荐度\n",
    "        基本的伪代码思路如下：\n",
    "        for item i\n",
    "          for every other user v that has a preference for i\n",
    "            compute similarity s between u and v\n",
    "            incorporate v's preference for i weighted by s into running aversge\n",
    "        return top items ranked by weighted average\n",
    "        \"\"\"\n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "\n",
    "        vs = self.userEventScores[:, j]\n",
    "        sims = self.userSimMatrix[i, :]\n",
    "\n",
    "        prod = sims * vs\n",
    "\n",
    "        try:\n",
    "            return prod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            return 0\n",
    "\n",
    "    def eventReco(self, userId, eventId):\n",
    "       \n",
    "        i = self.userIndex[userId]\n",
    "        j = self.eventIndex[eventId]\n",
    "        js = self.userEventScores[i, :]\n",
    "        psim = self.eventPropSim[:, j]\n",
    "        csim = self.eventContSim[:, j]\n",
    "        pprod = js * psim\n",
    "        cprod = js * csim\n",
    "\n",
    "        pscore = 0\n",
    "        cscore = 0\n",
    "        try:\n",
    "            pscore = pprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        try:\n",
    "            cscore = cprod[0, 0] - self.userEventScores[i, j]\n",
    "        except IndexError:\n",
    "            pass\n",
    "        return pscore, cscore\n",
    "\n",
    "    def userPop(self, userId):\n",
    "        \"\"\"\n",
    "        基于用户的朋友个数来推断用户的社交程度\n",
    "        主要的考量是如果用户的朋友非常多，可能会更倾向于参加各种社交活动\n",
    "        \"\"\"\n",
    "        if self.userIndex.has_key(userId):\n",
    "            i = self.userIndex[userId]\n",
    "            try:\n",
    "                return self.numFriends[0, i]\n",
    "            except IndexError:\n",
    "                return 0\n",
    "        else:\n",
    "            return 0\n",
    "\n",
    "    def friendInfluence(self, userId):\n",
    "        \"\"\"\n",
    "        朋友对用户的影响\n",
    "        主要考虑用户所有的朋友中，有多少是非常喜欢参加各种社交活动/event的\n",
    "        用户的朋友圈如果都积极参与各种event，可能会对当前用户有一定的影响\n",
    "        \"\"\"\n",
    "        nusers = np.shape(self.userFriends)[1]\n",
    "        i = self.userIndex[userId]\n",
    "        return (self.userFriends[i, :].sum(axis=0) / nusers)[0,0]\n",
    "\n",
    "    def eventPop(self, eventId):\n",
    "        \"\"\"\n",
    "        本活动本身的热度\n",
    "        主要是通过参与的人数来界定的\n",
    "        \"\"\"\n",
    "        i = self.eventIndex[eventId]\n",
    "        return self.eventPopularity[i, 0]\n",
    "    \n",
    "\n",
    "    #         self.userCFReco('3044012','1918771225')\n",
    "#         self.userCFReco('3044012','2529072432')\n",
    "#         self.userCFReco('4236494','152418051')\n",
    "#         self.userCFReco('4236494','2352676247')\n",
    "        self.sim_cal_ItemCF(10342, 7065)\n",
    "#         self.sim_cal_ItemCF(0, 1)\n",
    "#         self.sim_cal_ItemCF(7857, 2875)\n",
    "#         self.eventCFReco('4236494', '4203627753')\n",
    "#         self.sim_cal_UserCF(1280, 1645)\n",
    "#         self.sim_cal_UserCF(1280, 211)\n",
    "#         self.sim_cal_UserCF(1280, 1818)\n",
    "#         self.sim_cal_ItemCF(170, 1635)\n",
    "#         self.sim_cal_ItemCF(45, 1635)\n",
    "#         self.eventCFReco(1025, 1635)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def generateRSData(RS, train=True, header=True):\n",
    "    \"\"\"\n",
    "    把前面user-based协同过滤 和 item-based协同过滤，以及各种热度和影响度作为特征组合在一起\n",
    "    生成新的训练数据，用于分类器分类使用\n",
    "    \"\"\"\n",
    "    fn = \"train.csv\" if train else \"test.csv\"\n",
    "    fin = open(fn, 'rb')\n",
    "    fout = open(\"RS_\" + fn, 'wb')\n",
    "    \n",
    "    #忽略第一行（列名字）\n",
    "    fin.readline().strip().split(\",\")\n",
    "    \n",
    "    # write output header\n",
    "    if header:\n",
    "        ocolnames = [\"invited\", \"userCF_reco\", \"evtCF_reco\",\"svdCF_reco\",\"user_reco\", \"evt_p_reco\",\n",
    "        \"evt_c_reco\", \"user_pop\", \"frnd_infl\", \"evt_pop\"]\n",
    "        if train:\n",
    "            ocolnames.append(\"interested\")\n",
    "            ocolnames.append(\"not_interested\")\n",
    "        fout.write(\",\".join(ocolnames) + \"\\n\")\n",
    "    \n",
    "    ln = 0\n",
    "    count = 10\n",
    "    for line in fin:\n",
    "#         count = count - 1;\n",
    "#         if count <= 0:\n",
    "# #             break;\n",
    "        ln += 1\n",
    "        if ln%500 == 0:\n",
    "            print \"%s:%d (userId, eventId)=(%s, %s)\" % (fn, ln, userId, eventId)\n",
    "            #break;\n",
    "      \n",
    "        cols = line.strip().split(\",\")\n",
    "        userId = cols[0]\n",
    "        eventId = cols[1]\n",
    "        invited = cols[2]\n",
    "      \n",
    "        userCF_reco = RS.userCFReco(userId, eventId)\n",
    "        itemCF_reco = RS.eventCFReco(userId, eventId)\n",
    "#         if userCF_reco >= 0:\n",
    "#         #print('userCF_reco= {} for user {} and event {}'.format(userCF_reco, userId, eventId))\n",
    "        svdCF_reco = RS.svdCFReco(userId, eventId)\n",
    "        #print('itemCF_reco= {} for user {} and event {}'.format(itemCF_reco, userId, eventId))\n",
    "      \n",
    "        user_reco = RS.userReco(userId, eventId)\n",
    "        evt_p_reco, evt_c_reco = RS.eventReco(userId, eventId)\n",
    "        user_pop = RS.userPop(userId)\n",
    "     \n",
    "        frnd_infl = RS.friendInfluence(userId)\n",
    "        evt_pop = RS.eventPop(eventId)\n",
    "        ocols = [invited, userCF_reco, itemCF_reco, svdCF_reco,user_reco, evt_p_reco,\n",
    "        evt_c_reco, user_pop, frnd_infl, evt_pop]\n",
    "      \n",
    "        if train:\n",
    "            ocols.append(cols[4]) # interested\n",
    "            ocols.append(cols[5]) # not_interested\n",
    "        fout.write(\",\".join(map(lambda x: str(x), ocols)) + \"\\n\")\n",
    "    \n",
    "    fin.close()\n",
    "    fout.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "in step 0, rmse: 0.850656524165\n",
      "in step 1000, rmse: 0.801344018032\n",
      "in step 2000, rmse: 0.764632734027\n",
      "in step 3000, rmse: 0.733875877533\n",
      "in step 4000, rmse: 0.702627403056\n",
      "in step 5000, rmse: 0.675771020008\n",
      "in step 6000, rmse: 0.650242733\n",
      "in step 7000, rmse: 0.627263316393\n",
      "in step 8000, rmse: 0.605747122168\n",
      "in step 9000, rmse: 0.586259016222\n",
      "in step 10000, rmse: 0.568328631975\n",
      "in step 11000, rmse: 0.552418684316\n",
      "in step 12000, rmse: 0.538047568597\n",
      "in step 13000, rmse: 0.525002291396\n",
      "in step 14000, rmse: 0.513349261369\n",
      "in step 15000, rmse: 0.5021979592\n",
      "SVD trained\n",
      "生成训练数据...\n",
      "\n",
      "train.csv:500 (userId, eventId)=(123290209, 1887085024)\n",
      "train.csv:1000 (userId, eventId)=(272886293, 199858305)\n",
      "train.csv:1500 (userId, eventId)=(395305791, 1582270949)\n",
      "train.csv:2000 (userId, eventId)=(527523423, 3272728211)\n",
      "train.csv:2500 (userId, eventId)=(651258472, 792632006)\n",
      "train.csv:3000 (userId, eventId)=(811791433, 524756826)\n",
      "train.csv:3500 (userId, eventId)=(985547042, 1269035551)\n",
      "train.csv:4000 (userId, eventId)=(1107615001, 173949238)\n",
      "train.csv:4500 (userId, eventId)=(1236336671, 3849306291)\n",
      "train.csv:5000 (userId, eventId)=(1414301782, 2652356640)\n",
      "train.csv:5500 (userId, eventId)=(1595465532, 955398943)\n",
      "train.csv:6000 (userId, eventId)=(1747091728, 2131379889)\n",
      "train.csv:6500 (userId, eventId)=(1914182220, 955398943)\n",
      "train.csv:7000 (userId, eventId)=(2071842684, 1076364848)\n",
      "train.csv:7500 (userId, eventId)=(2217853337, 3051438735)\n",
      "train.csv:8000 (userId, eventId)=(2338481531, 2525447278)\n",
      "train.csv:8500 (userId, eventId)=(2489551967, 520657921)\n",
      "train.csv:9000 (userId, eventId)=(2650493630, 87962584)\n",
      "train.csv:9500 (userId, eventId)=(2791418962, 4223848259)\n",
      "train.csv:10000 (userId, eventId)=(2903662804, 2791462807)\n",
      "train.csv:10500 (userId, eventId)=(3036141956, 3929507420)\n",
      "train.csv:11000 (userId, eventId)=(3176074542, 3459485614)\n",
      "train.csv:11500 (userId, eventId)=(3285425249, 2271782630)\n",
      "train.csv:12000 (userId, eventId)=(3410667855, 1063772489)\n",
      "train.csv:12500 (userId, eventId)=(3531604778, 2584839423)\n",
      "train.csv:13000 (userId, eventId)=(3686871863, 53495098)\n",
      "train.csv:13500 (userId, eventId)=(3833637800, 2415873572)\n",
      "train.csv:14000 (userId, eventId)=(3944021305, 2096772901)\n",
      "train.csv:14500 (userId, eventId)=(4075466480, 3567240505)\n",
      "train.csv:15000 (userId, eventId)=(4197193550, 1628057176)\n",
      "生成预测数据...\n",
      "\n",
      "test.csv:500 (userId, eventId)=(182290053, 2529072432)\n",
      "test.csv:1000 (userId, eventId)=(433510318, 4244463632)\n",
      "test.csv:1500 (userId, eventId)=(632808865, 2845303452)\n",
      "test.csv:2000 (userId, eventId)=(813611885, 2036538169)\n",
      "test.csv:2500 (userId, eventId)=(1010701404, 303459881)\n",
      "test.csv:3000 (userId, eventId)=(1210932037, 2529072432)\n",
      "test.csv:3500 (userId, eventId)=(1452921099, 2705317682)\n",
      "test.csv:4000 (userId, eventId)=(1623287180, 1626678328)\n",
      "test.csv:4500 (userId, eventId)=(1855201342, 2603032829)\n",
      "test.csv:5000 (userId, eventId)=(2083900381, 2529072432)\n",
      "test.csv:5500 (userId, eventId)=(2318415276, 2509151803)\n",
      "test.csv:6000 (userId, eventId)=(2528161539, 4025975316)\n",
      "test.csv:6500 (userId, eventId)=(2749110768, 4244406355)\n",
      "test.csv:7000 (userId, eventId)=(2927772127, 1532377761)\n",
      "test.csv:7500 (userId, eventId)=(3199685636, 1776393554)\n",
      "test.csv:8000 (userId, eventId)=(3393388475, 680270887)\n",
      "test.csv:8500 (userId, eventId)=(3601169721, 154434302)\n",
      "test.csv:9000 (userId, eventId)=(3828963415, 3067222491)\n",
      "test.csv:9500 (userId, eventId)=(4018723397, 2522610844)\n",
      "test.csv:10000 (userId, eventId)=(4180064266, 2658555390)\n"
     ]
    }
   ],
   "source": [
    "RS = RecommonderSystem()\n",
    "print \"生成训练数据...\\n\"\n",
    "generateRSData(RS,train=True,  header=True) #TODO:记得去掉注释\n",
    "\n",
    "print \"生成预测数据...\\n\"\n",
    "generateRSData(RS, train=False, header=True) #TODO:记得去掉注释"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "时间、地点等特征都没有处理了，可以考虑用户看到event的时间与event开始时间的差、用户地点和event地点的差异。。。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
