{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于SVD的协同过滤"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "#load数据（用户和物品索引，以及倒排表）\n",
    "import pickle as pk\n",
    "import json  \n",
    "\n",
    "from numpy.random import random\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "temp_data = './temp_data/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取预备数据\n",
    "users_index = pk.load(open(temp_data + \"users_index.pkl\", 'rb'))\n",
    "items_index = pk.load(open(temp_data + \"items_index.pkl\", 'rb'))\n",
    "user_2_items = pk.load(open(temp_data + \"user_2_items.pkl\", 'rb'))\n",
    "item_2_users = pk.load(open(temp_data + \"item_2_users.pkl\", 'rb'))\n",
    "\n",
    "n_users = len(users_index)\n",
    "n_items = len(items_index)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user</th>\n",
       "      <th>item</th>\n",
       "      <th>score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>7bdfc45af7e15511d150e2acb798cd5e4788abf5</td>\n",
       "      <td>SOXBCZH12A67ADAD77</td>\n",
       "      <td>8</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>c405c586f6d7aadbbadfcba5393b543fd99372ff</td>\n",
       "      <td>SOXFYTY127E9433E7D</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>625d0167edbc5df88e9fbebe3fcdd6b121a316bb</td>\n",
       "      <td>SONOYIB12A81C1F88C</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>20ad98ab543da9ec41c6ac3b6354c5ab3ca6bc5e</td>\n",
       "      <td>SOIMCDE12A6D4F8383</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>d331a8bf7d0ca9cb37e375496e6075603f6fb44a</td>\n",
       "      <td>SONYKOW12AB01849C9</td>\n",
       "      <td>40</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                       user                item  score\n",
       "0  7bdfc45af7e15511d150e2acb798cd5e4788abf5  SOXBCZH12A67ADAD77      8\n",
       "1  c405c586f6d7aadbbadfcba5393b543fd99372ff  SOXFYTY127E9433E7D      3\n",
       "2  625d0167edbc5df88e9fbebe3fcdd6b121a316bb  SONOYIB12A81C1F88C      0\n",
       "3  20ad98ab543da9ec41c6ac3b6354c5ab3ca6bc5e  SOIMCDE12A6D4F8383      0\n",
       "4  d331a8bf7d0ca9cb37e375496e6075603f6fb44a  SONYKOW12AB01849C9     40"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#读取测试数据\n",
    "triplet_cols = ['user','item', 'score'] \n",
    "\n",
    "df_triplet = pd.read_csv(temp_data +'triplet_dataset_train.csv', sep=',', names=triplet_cols, encoding='latin-1', header=0)\n",
    "df_triplet.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(800, 40)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#隐含变量的维数\n",
    "K = 40\n",
    "\n",
    "#item和用户的偏置项\n",
    "bi = np.zeros((n_items,1))    \n",
    "bu = np.zeros((n_users,1))   \n",
    "\n",
    "#item和用户的隐含向量\n",
    "qi =  np.zeros((n_items,K))    \n",
    "pu =  np.zeros((n_users,K))   \n",
    "\n",
    "\n",
    "for uid in range(n_users):  #对每个用户\n",
    "    pu[uid] = np.reshape(random((K,1))/10*(np.sqrt(K)),K)\n",
    "       \n",
    "for iid in range(n_items):  #对每个item\n",
    "    qi[iid] = np.reshape(random((K,1))/10*(np.sqrt(K)),K)\n",
    "\n",
    "#所有用户的平均打分\n",
    "mu = df_triplet['score'].mean()  #average rating\n",
    "\n",
    "qi.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7.522072297184741"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def svd_pred(user_index, item_index):  \n",
    "    score = mu + bi[item_index] + bu[user_index] + np.sum(qi[item_index]* pu[user_index])  \n",
    "\n",
    "    return score  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "start\n",
      "The 0-th  step is running\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'numpy.ndarray' object has no attribute 'clone'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-8-6b659a1cd854>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     35\u001b[0m         \u001b[0mbi\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mgamma\u001b[0m \u001b[1;33m*\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0meui\u001b[0m \u001b[1;33m-\u001b[0m \u001b[0mLambda\u001b[0m \u001b[1;33m*\u001b[0m \u001b[0mbi\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     36\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 37\u001b[1;33m         \u001b[0mtemp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mqi\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     38\u001b[0m         \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"temp1\"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtemp\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     39\u001b[0m         \u001b[0mqi\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mgamma\u001b[0m \u001b[1;33m*\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0meui\u001b[0m\u001b[1;33m*\u001b[0m \u001b[0mpu\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0muser_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m-\u001b[0m \u001b[0mLambda\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mqi\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mitem_index\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'numpy.ndarray' object has no attribute 'clone'"
     ]
    }
   ],
   "source": [
    "#gamma：为学习率\n",
    "#Lambda：正则参数\n",
    "#steps：迭代次数\n",
    "\n",
    "steps=50\n",
    "gamma=0.04\n",
    "Lambda=0.15\n",
    "\n",
    "#总的打分记录数目\n",
    "n_records = df_triplet.shape[0]\n",
    "print(\"start\")\n",
    "\n",
    "for step in range(steps):  \n",
    "    print ('The ' + str(step) + '-th  step is running' )\n",
    "    rmse_sum=0.0 \n",
    "            \n",
    "    #将训练样本打散顺序\n",
    "    kk = np.random.permutation(n_records)  \n",
    "    for j in range(n_records):  \n",
    "        #每次一个训练样本\n",
    "        line = kk[j]  \n",
    "        \n",
    "        user_index = users_index [df_triplet.iloc[line]['user']]\n",
    "        item_index = items_index [df_triplet.iloc[line]['item']]\n",
    "    \n",
    "        score  = df_triplet.iloc[line]['score']\n",
    "                \n",
    "        #预测残差\n",
    "        eui = score - svd_pred(user_index, item_index)  \n",
    "        #残差平方和\n",
    "        rmse_sum += eui**2  \n",
    "                \n",
    "        #随机梯度下降，更新\n",
    "        bu[user_index] += gamma * (eui - Lambda * bu[user_index])  \n",
    "        bi[item_index] += gamma * (eui - Lambda * bi[item_index]) \n",
    "                \n",
    "        temp = qi[item_index].copy()\n",
    "        print(item_index, \"temp1\",temp)\n",
    "        qi[item_index] += gamma * (eui* pu[user_index]- Lambda*qi[item_index] )  \n",
    "        print(\"temp2\", temp)\n",
    "        pu[user_index] += gamma * (eui* temp - Lambda*pu[user_index])  \n",
    "            \n",
    "    #学习率递减\n",
    "    gamma=gamma*0.93  \n",
    "    print (\"the rmse of this step on train data is \",np.sqrt(rmse_sum/n_records))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def save_json(filepath):\n",
    "    dict_ = {}\n",
    "    dict_['mu'] = mu\n",
    "    dict_['K'] = K\n",
    "    \n",
    "    dict_['bi'] = bi.tolist()\n",
    "    dict_['bu'] = bu.tolist()\n",
    "    \n",
    "    dict_['qi'] = qi.tolist()\n",
    "    dict_['pu'] = pu.tolist()\n",
    "\n",
    "    # Creat json and save to file\n",
    "    json_txt = json.dumps(dict_)\n",
    "    with open(filepath, 'w') as file:\n",
    "        file.write(json_txt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_json(filepath):\n",
    "    with open(filepath, 'r') as file:\n",
    "        dict_ = json.load(file)\n",
    "\n",
    "        mu = dict_['mu']\n",
    "        K = dict_['K']\n",
    "\n",
    "        bi = np.asarray(dict_['bi'])\n",
    "        bu = np.asarray(dict_['bu'])\n",
    "    \n",
    "        qi = np.asarray(dict_['qi'])\n",
    "        pu = np.asarray(dict_['pu'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_json(temp_data + 'svd_model.json')\n",
    "load_json(temp_data + 'svd_model.json')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def recommend(tar_user):\n",
    "    tar_user_index = users_index[tar_user]\n",
    "    #训练集中该用户打过分的item\n",
    "    tar_user_items = user_2_items[tar_user_index]\n",
    "    #该用户对所有item的打分\n",
    "    tar_user_items_pred_scores = np.zeros(n_items)\n",
    "    \n",
    "    #预测打分\n",
    "    for i in range(n_items):\n",
    "        if i not in tar_user_items:\n",
    "            tar_user_items_pred_scores[i] = svd_pred(tar_user_index, i)\n",
    "    \n",
    "    #推荐\n",
    "    sort_index = sorted(((e,i) for i,e in enumerate(list(tar_user_items_pred_scores))), reverse=True)\n",
    "    columns = ['item', 'score']\n",
    "    df = pd.DataFrame(columns=columns)\n",
    "    \n",
    "    for i in range(0,len(sort_index)):\n",
    "        cur_item_index = sort_index[i][1] \n",
    "        cur_item = list (items_index.keys()) [list (items_index.values()).index (cur_item_index)]\n",
    "        \n",
    "        if ~np.isnan(sort_index[i][0]) and cur_item_index not in tar_user_items:\n",
    "            df.loc[len(df)]=[cur_item, sort_index[i][0]]\n",
    "            \n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取测试数据\n",
    "triplet_cols = ['user','item', 'score'] \n",
    "\n",
    "df_triplet_test = pd.read_csv(temp_data +'triplet_dataset_test.csv', sep=',', names=triplet_cols, encoding='latin-1', header=0)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "RECOMMEND_ITEM_SIZE = 10\n",
    "\n",
    "n_hits = 0 #击中数目\n",
    "n_total_rec_items = 0 #推荐的item总数(推荐了一个item就加1)\n",
    "n_test_items = 0 #真实的item记录总数()\n",
    "\n",
    "#所有被推荐商品的集合（对不同用户），用于计算覆盖度\n",
    "all_rec_items = set()\n",
    "\n",
    "#统计总的用户\n",
    "unique_users_test = df_triplet_test['user'].unique()\n",
    "\n",
    "#已经计算完毕的用户数\n",
    "complete_user_count = 0\n",
    "\n",
    "for user in unique_users_test:\n",
    "    if user not in users_index:\n",
    "        print(str(user) + 'is new user,\\n') \n",
    "        continue #此user是新用户，无法计算出相似度\n",
    "    #找出test中user的记录集合\n",
    "    user_records_test= df_triplet_test[df_triplet_test.user == user]\n",
    "    #计算推荐item\n",
    "    rec_items = recommend(user)\n",
    "    \n",
    "    for i in range(RECOMMEND_ITEM_SIZE):\n",
    "        item = rec_items.iloc[i]['item']\n",
    "        \n",
    "        if item in user_records_test['item'].values:\n",
    "            n_hits += 1\n",
    "        all_rec_items.add(item)\n",
    "        \n",
    "    #推荐的item总数\n",
    "    n_total_rec_items += RECOMMEND_ITEM_SIZE\n",
    "    \n",
    "    #真实item的总数\n",
    "    n_test_items += user_records_test.shape[0]   \n",
    "    \n",
    "    complete_user_count += 1\n",
    "    print('complete:' + str(user) + \" progress:\" + str(complete_user_count/len(unique_users_test)))\n",
    "\n",
    "#Precision & Recall\n",
    "precision = n_hits / (1.0*n_total_rec_items)\n",
    "recall = n_hits / (1.0*n_test_items)\n",
    "\n",
    "#覆盖度：推荐商品占总需要推荐商品的比例\n",
    "coverage = len(all_rec_items) / (1.0* n_items)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
