{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于SVD的协同过滤"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入工具包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "#load数据（用户和物品索引，以及倒排表）\n",
    "import pickle\n",
    "import json  \n",
    "\n",
    "from numpy.random import random\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 读入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#用户和item的索引\n",
    "users_index = pickle.load(open(\"users_index.pkl\", 'rb'))\n",
    "items_index = pickle.load(open(\"items_index.pkl\", 'rb'))\n",
    "\n",
    "n_users = len(users_index)\n",
    "n_items = len(items_index)\n",
    "    \n",
    "#用户-物品关系矩阵R\n",
    "#scores = sio.mmread(\"scores\").todense()\n",
    "    \n",
    "#倒排表\n",
    "##每个用户打过分的电影\n",
    "user_items = pickle.load(open(\"user_items.pkl\", 'rb'))\n",
    "##对每个电影打过分的事用户\n",
    "item_users = pickle.load(open(\"item_users.pkl\", 'rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user_id</th>\n",
       "      <th>item_id</th>\n",
       "      <th>rating</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>3</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>4</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   user_id  item_id  rating\n",
       "0        1        1       5\n",
       "1        1        2       3\n",
       "2        1        3       4\n",
       "3        1        4       3\n",
       "4        1        5       3"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#读取训练数据\n",
    "triplet_cols = ['user_id','item_id', 'rating', 'timestamp'] \n",
    "\n",
    "dpath = './data/'\n",
    "df_triplet = pd.read_csv(dpath +'u1.base', sep='\\t', names=triplet_cols, encoding='latin-1')\n",
    "df_triplet = df_triplet.drop(['timestamp'], axis=1)\n",
    "\n",
    "df_triplet.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 初始化模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#隐含变量的维数\n",
    "K = 40\n",
    "\n",
    "#item和用户的偏置项\n",
    "bi = np.zeros((n_items,1))    \n",
    "bu = np.zeros((n_users,1))   \n",
    "\n",
    "#item和用户的隐含向量\n",
    "qi =  np.zeros((n_items,K))    \n",
    "pu =  np.zeros((n_users,K))   \n",
    "\n",
    "\n",
    "for uid in range(n_users):  #对每个用户\n",
    "    pu[uid] = np.reshape(random((K,1))/10*(np.sqrt(K)),K)\n",
    "       \n",
    "for iid in range(n_items):  #对每个item\n",
    "    qi[iid] = np.reshape(random((K,1))/10*(np.sqrt(K)),K)\n",
    "\n",
    "#所有用户的平均打分\n",
    "mu = df_triplet['rating'].mean()  #average rating"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 根据当前参数，预测用户uid对Item（i_id）的打分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def svd_pred(uid, iid):  \n",
    "    score = mu + bi[iid] + bu[uid] + np.sum(qi[iid]* pu[uid])  \n",
    "        \n",
    "    #将打分范围控制在1-5之间\n",
    "    #if score>5:  \n",
    "        #score = 5  \n",
    "    #elif score<1:  \n",
    "        #score = 1  \n",
    "        \n",
    "    return score  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The 0-th  step is running\n",
      "the rmse of this step on train data is  [1.18444237]\n",
      "The 1-th  step is running\n",
      "the rmse of this step on train data is  [0.92634387]\n",
      "The 2-th  step is running\n",
      "the rmse of this step on train data is  [0.90740364]\n",
      "The 3-th  step is running\n",
      "the rmse of this step on train data is  [0.8962733]\n",
      "The 4-th  step is running\n",
      "the rmse of this step on train data is  [0.88714431]\n",
      "The 5-th  step is running\n",
      "the rmse of this step on train data is  [0.87823571]\n",
      "The 6-th  step is running\n",
      "the rmse of this step on train data is  [0.87074741]\n",
      "The 7-th  step is running\n",
      "the rmse of this step on train data is  [0.86408798]\n",
      "The 8-th  step is running\n",
      "the rmse of this step on train data is  [0.85839558]\n",
      "The 9-th  step is running\n",
      "the rmse of this step on train data is  [0.8535175]\n",
      "The 10-th  step is running\n",
      "the rmse of this step on train data is  [0.8492288]\n",
      "The 11-th  step is running\n",
      "the rmse of this step on train data is  [0.84508639]\n",
      "The 12-th  step is running\n",
      "the rmse of this step on train data is  [0.84189526]\n",
      "The 13-th  step is running\n",
      "the rmse of this step on train data is  [0.83907103]\n",
      "The 14-th  step is running\n",
      "the rmse of this step on train data is  [0.83647907]\n",
      "The 15-th  step is running\n",
      "the rmse of this step on train data is  [0.83376577]\n",
      "The 16-th  step is running\n",
      "the rmse of this step on train data is  [0.83184384]\n",
      "The 17-th  step is running\n",
      "the rmse of this step on train data is  [0.82950018]\n",
      "The 18-th  step is running\n",
      "the rmse of this step on train data is  [0.82770702]\n",
      "The 19-th  step is running\n",
      "the rmse of this step on train data is  [0.82642347]\n",
      "The 20-th  step is running\n",
      "the rmse of this step on train data is  [0.82466848]\n",
      "The 21-th  step is running\n",
      "the rmse of this step on train data is  [0.82336214]\n",
      "The 22-th  step is running\n",
      "the rmse of this step on train data is  [0.82210381]\n",
      "The 23-th  step is running\n",
      "the rmse of this step on train data is  [0.82089965]\n",
      "The 24-th  step is running\n",
      "the rmse of this step on train data is  [0.81994497]\n",
      "The 25-th  step is running\n",
      "the rmse of this step on train data is  [0.81893137]\n",
      "The 26-th  step is running\n",
      "the rmse of this step on train data is  [0.81798663]\n",
      "The 27-th  step is running\n",
      "the rmse of this step on train data is  [0.81714323]\n",
      "The 28-th  step is running\n",
      "the rmse of this step on train data is  [0.81629625]\n",
      "The 29-th  step is running\n",
      "the rmse of this step on train data is  [0.81549095]\n",
      "The 30-th  step is running\n",
      "the rmse of this step on train data is  [0.81509741]\n",
      "The 31-th  step is running\n",
      "the rmse of this step on train data is  [0.81453418]\n",
      "The 32-th  step is running\n",
      "the rmse of this step on train data is  [0.81378628]\n",
      "The 33-th  step is running\n",
      "the rmse of this step on train data is  [0.81340754]\n",
      "The 34-th  step is running\n",
      "the rmse of this step on train data is  [0.81280163]\n",
      "The 35-th  step is running\n",
      "the rmse of this step on train data is  [0.8123759]\n",
      "The 36-th  step is running\n",
      "the rmse of this step on train data is  [0.81196806]\n",
      "The 37-th  step is running\n",
      "the rmse of this step on train data is  [0.81171496]\n",
      "The 38-th  step is running\n",
      "the rmse of this step on train data is  [0.81127062]\n",
      "The 39-th  step is running\n",
      "the rmse of this step on train data is  [0.810821]\n",
      "The 40-th  step is running\n",
      "the rmse of this step on train data is  [0.81061506]\n",
      "The 41-th  step is running\n",
      "the rmse of this step on train data is  [0.81023499]\n",
      "The 42-th  step is running\n",
      "the rmse of this step on train data is  [0.81000311]\n",
      "The 43-th  step is running\n",
      "the rmse of this step on train data is  [0.80976359]\n",
      "The 44-th  step is running\n",
      "the rmse of this step on train data is  [0.80949836]\n",
      "The 45-th  step is running\n",
      "the rmse of this step on train data is  [0.80930498]\n",
      "The 46-th  step is running\n",
      "the rmse of this step on train data is  [0.80908433]\n",
      "The 47-th  step is running\n",
      "the rmse of this step on train data is  [0.8089213]\n",
      "The 48-th  step is running\n",
      "the rmse of this step on train data is  [0.80875705]\n",
      "The 49-th  step is running\n",
      "the rmse of this step on train data is  [0.80858781]\n"
     ]
    }
   ],
   "source": [
    "#gamma：为学习率\n",
    "#Lambda：正则参数\n",
    "#steps：迭代次数\n",
    "\n",
    "steps=50\n",
    "gamma=0.04\n",
    "Lambda=0.15\n",
    "\n",
    "#总的打分记录数目\n",
    "n_records = df_triplet.shape[0]\n",
    "\n",
    "for step in range(steps):  \n",
    "    print ('The ' + str(step) + '-th  step is running' )\n",
    "    rmse_sum=0.0 \n",
    "            \n",
    "    #将训练样本打散顺序\n",
    "    kk = np.random.permutation(n_records)  \n",
    "    for j in range(n_records):  \n",
    "        #每次一个训练样本\n",
    "        line = kk[j]  \n",
    "        \n",
    "        uid = users_index [df_triplet.iloc[line]['user_id']]\n",
    "        iid = items_index [df_triplet.iloc[line]['item_id']]\n",
    "    \n",
    "        rating  = df_triplet.iloc[line]['rating']\n",
    "                \n",
    "        #预测残差\n",
    "        eui = rating - svd_pred(uid, iid)  \n",
    "        #残差平方和\n",
    "        rmse_sum += eui**2  \n",
    "                \n",
    "        #随机梯度下降，更新\n",
    "        bu[uid] += gamma * (eui - Lambda * bu[uid])  \n",
    "        bi[iid] += gamma * (eui - Lambda * bi[iid]) \n",
    "                \n",
    "        temp = qi[iid]  \n",
    "        qi[iid] += gamma * (eui* pu[uid]- Lambda*qi[iid] )  \n",
    "        pu[uid] += gamma * (eui* temp - Lambda*pu[uid])  \n",
    "            \n",
    "    #学习率递减\n",
    "    gamma=gamma*0.93  \n",
    "    print (\"the rmse of this step on train data is \",np.sqrt(rmse_sum/n_records))  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 保存模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# A method for saving object data to JSON file\n",
    "def save_json(filepath):\n",
    "    dict_ = {}\n",
    "    dict_['mu'] = mu\n",
    "    dict_['K'] = K\n",
    "    \n",
    "    dict_['bi'] = bi.tolist()\n",
    "    dict_['bu'] = bu.tolist()\n",
    "    \n",
    "    dict_['qi'] = qi.tolist()\n",
    "    dict_['pu'] = pu.tolist()\n",
    "\n",
    "    # Creat json and save to file\n",
    "    json_txt = json.dumps(dict_)\n",
    "    with open(filepath, 'w') as file:\n",
    "        file.write(json_txt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# A method for loading data from JSON file\n",
    "def load_json(filepath):\n",
    "    with open(filepath, 'r') as file:\n",
    "        dict_ = json.load(file)\n",
    "\n",
    "        mu = dict_['mu']\n",
    "        K = dict_['K']\n",
    "\n",
    "        bi = np.asarray(dict_['bi'])\n",
    "        bu = np.asarray(dict_['bu'])\n",
    "    \n",
    "        qi = np.asarray(dict_['qi'])\n",
    "        pu = np.asarray(dict_['pu'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_json('svd_model.json')\n",
    "load_json('svd_model.json')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 对给定用户，推荐物品/计算打分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "#user：用户\n",
    "#返回推荐items及其打分（DataFrame）\n",
    "def svd_CF_recommend(user):\n",
    "    cur_user_id = users_index[user]\n",
    "    \n",
    "    #训练集中该用户打过分的item\n",
    "    cur_user_items = user_items[cur_user_id]\n",
    "\n",
    "    #该用户对所有item的打分\n",
    "    user_items_scores = np.zeros(n_items)\n",
    "\n",
    "    #预测打分\n",
    "    for i in range(n_items):  # all items \n",
    "        if i not in cur_user_items: #训练集中没打过分\n",
    "            user_items_scores[i] = svd_pred(cur_user_id, i)  #预测打分\n",
    "    \n",
    "    #推荐\n",
    "    #Sort the indices of user_item_scores based upon their value，Also maintain the corresponding score\n",
    "    sort_index = sorted(((e,i) for i,e in enumerate(list(user_items_scores))), reverse=True)\n",
    "    \n",
    "    #Create a dataframe from the following\n",
    "    columns = ['item_id', 'score']\n",
    "    df = pd.DataFrame(columns=columns)\n",
    "         \n",
    "    #Fill the dataframe with top 20 (n_rec_items) item based recommendations\n",
    "    #sort_index = sort_index[0:n_rec_items]\n",
    "    #Fill the dataframe with all items based recommendations\n",
    "    for i in range(0,len(sort_index)):\n",
    "        cur_item_index = sort_index[i][1] \n",
    "        cur_item = list (items_index.keys()) [list (items_index.values()).index (cur_item_index)]\n",
    "            \n",
    "        if ~np.isnan(sort_index[i][0]) and cur_item_index not in cur_user_items:\n",
    "            df.loc[len(df)]=[cur_item, sort_index[i][0]]\n",
    "    \n",
    "    return df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 读取测试数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user_id</th>\n",
       "      <th>item_id</th>\n",
       "      <th>rating</th>\n",
       "      <th>timestamp</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>6</td>\n",
       "      <td>5</td>\n",
       "      <td>887431973</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>3</td>\n",
       "      <td>875693118</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>12</td>\n",
       "      <td>5</td>\n",
       "      <td>878542960</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>14</td>\n",
       "      <td>5</td>\n",
       "      <td>874965706</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>17</td>\n",
       "      <td>3</td>\n",
       "      <td>875073198</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   user_id  item_id  rating  timestamp\n",
       "0        1        6       5  887431973\n",
       "1        1       10       3  875693118\n",
       "2        1       12       5  878542960\n",
       "3        1       14       5  874965706\n",
       "4        1       17       3  875073198"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#读取测试数据\n",
    "triplet_cols = ['user_id','item_id', 'rating', 'timestamp'] \n",
    "\n",
    "dpath = './data/'\n",
    "df_triplet_test = pd.read_csv(dpath +'u1.test', sep='\\t', names=triplet_cols, encoding='latin-1')\n",
    "df_triplet_test.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试，并计算评价指标\n",
    "PR、覆盖度、RMSE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "599 is a new item or  user 7 already rated it.\n",
      "\n",
      "711 is a new item or  user 10 already rated it.\n",
      "\n",
      "814 is a new item or  user 13 already rated it.\n",
      "\n",
      "830 is a new item or  user 13 already rated it.\n",
      "\n",
      "852 is a new item or  user 13 already rated it.\n",
      "\n",
      "857 is a new item or  user 13 already rated it.\n",
      "\n",
      "1156 is a new item or  user 76 already rated it.\n",
      "\n",
      "1236 is a new item or  user 100 already rated it.\n",
      "\n",
      "1309 is a new item or  user 167 already rated it.\n",
      "\n",
      "1310 is a new item or  user 167 already rated it.\n",
      "\n",
      "1320 is a new item or  user 181 already rated it.\n",
      "\n",
      "1343 is a new item or  user 181 already rated it.\n",
      "\n",
      "1348 is a new item or  user 181 already rated it.\n",
      "\n",
      "1364 is a new item or  user 181 already rated it.\n",
      "\n",
      "1373 is a new item or  user 181 already rated it.\n",
      "\n",
      "1457 is a new item or  user 234 already rated it.\n",
      "\n",
      "1458 is a new item or  user 234 already rated it.\n",
      "\n",
      "1492 is a new item or  user 279 already rated it.\n",
      "\n",
      "1493 is a new item or  user 279 already rated it.\n",
      "\n",
      "1498 is a new item or  user 279 already rated it.\n",
      "\n",
      "1505 is a new item or  user 291 already rated it.\n",
      "\n",
      "1520 is a new item or  user 314 already rated it.\n",
      "\n",
      "1533 is a new item or  user 381 already rated it.\n",
      "\n",
      "1536 is a new item or  user 385 already rated it.\n",
      "\n",
      "1543 is a new item or  user 399 already rated it.\n",
      "\n",
      "1557 is a new item or  user 405 already rated it.\n",
      "\n",
      "1561 is a new item or  user 405 already rated it.\n",
      "\n",
      "1562 is a new item or  user 405 already rated it.\n",
      "\n",
      "1563 is a new item or  user 405 already rated it.\n",
      "\n",
      "1565 is a new item or  user 405 already rated it.\n",
      "\n",
      "1582 is a new item or  user 405 already rated it.\n",
      "\n",
      "1586 is a new item or  user 405 already rated it.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#统计总的用户\n",
    "unique_users_test = df_triplet_test['user_id'].unique()\n",
    "\n",
    "#为每个用户推荐的item的数目\n",
    "n_rec_items = 20\n",
    "\n",
    "#性能评价参数初始化，用户计算Percison和Recall\n",
    "n_hits = 0\n",
    "n_total_rec_items = 0\n",
    "n_test_items = 0\n",
    "\n",
    "#所有被推荐商品的集合（对不同用户），用于计算覆盖度\n",
    "all_rec_items = set()\n",
    "\n",
    "#残差平方和，用与计算RMSE\n",
    "rss_test = 0.0\n",
    "\n",
    "#对每个测试用户\n",
    "for user in unique_users_test:\n",
    "    #测试集中该用户打过分的电影（用于计算评价指标的真实值）\n",
    "    if user not in users_index:   #user在训练集中没有出现过，新用户不能用协同过滤\n",
    "        print(str(user) + ' is a new user.\\n')\n",
    "        continue\n",
    "   \n",
    "    user_records_test= df_triplet_test[df_triplet_test.user_id == user]\n",
    "    \n",
    "    #对每个测试用户，计算该用户对训练集中未出现过的商品的打分，并基于该打分进行推荐（top n_rec_items）\n",
    "    #返回结果为DataFrame\n",
    "    rec_items = svd_CF_recommend(user)\n",
    "    for i in range(n_rec_items):\n",
    "        item = rec_items.iloc[i]['item_id']\n",
    "        \n",
    "        if item in user_records_test['item_id'].values:\n",
    "            n_hits += 1\n",
    "        all_rec_items.add(item)\n",
    "    \n",
    "    #计算rmse\n",
    "    for i in range(user_records_test.shape[0]):\n",
    "        item = user_records_test.iloc[i]['item_id']\n",
    "        score = user_records_test.iloc[i]['rating']\n",
    "        \n",
    "        df1 = rec_items[rec_items.item_id == item]\n",
    "        if(df1.shape[0] == 0): #item不在推荐列表中，可能是新item在训练集中没有出现过，或者该用户已经打过分新item不能被协同过滤推荐\n",
    "            print(str(item) + ' is a new item or  user ' + str(user) +' already rated it.\\n')\n",
    "            continue\n",
    "        pred_score = df1['score'].values[0]\n",
    "        rss_test += (pred_score - score)**2     #残差平方和\n",
    "    \n",
    "    #推荐的item总数\n",
    "    n_total_rec_items += n_rec_items\n",
    "    \n",
    "    #真实item的总数\n",
    "    n_test_items += user_records_test.shape[0]\n",
    "\n",
    "#Precision & Recall\n",
    "precision = n_hits / (1.0*n_total_rec_items)\n",
    "recall = n_hits / (1.0*n_test_items)\n",
    "\n",
    "#覆盖度：推荐商品占总需要推荐商品的比例\n",
    "coverage = len(all_rec_items) / (1.0* n_items)\n",
    "\n",
    "#打分的均方误差\n",
    "rmse=np.sqrt(rss_test / df_triplet_test.shape[0])  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.07930283224400872"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "precision"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.0364"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "recall"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.14484848484848484"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "coverage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9251871398976897"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rmse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
