{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import scipy.spatial.distance as ssd\n",
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>user_id</th>\n",
       "      <th>item_id</th>\n",
       "      <th>rating</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>3</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>4</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   user_id  item_id  rating\n",
       "0        1        1       5\n",
       "1        1        2       3\n",
       "2        1        3       4\n",
       "3        1        4       3\n",
       "4        1        5       3"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 读取训练集，使用到的列为user_id，item_id，rating\n",
    "df_training_data = pd.read_csv(\n",
    "    './data/movielen_rating_training.base',\n",
    "    names=['user_id','item_id','rating'],\n",
    "    usecols=[0,1,2],\n",
    "    sep='\\t')\n",
    "\n",
    "# 读取测试集\n",
    "df_test_data = pd.read_csv('./data/movielen_rating_test.base',\n",
    "                           sep='\\t',\n",
    "                           names=['user_id','item_id','rating'],\n",
    "                           usecols=[0,1,2])\n",
    "\n",
    "\n",
    "df_training_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 根据训练集，构建用户列表、商品列表、分数矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 不重复的id值列表 和 id值及其在列表中的索引的对应关系字典\n",
    "def Construct_ID_map(df, colname):\n",
    "    id_list = df[colname].unique()\n",
    "    index_map_dict = {}\n",
    "    \n",
    "    for index in range(len(id_list)):\n",
    "        id_name = id_list[index]\n",
    "        index_map_dict[id_name] = index\n",
    "    \n",
    "    return id_list, index_map_dict\n",
    "\n",
    "user_id_s, user_index_map = Construct_ID_map(df_training_data, 'user_id')\n",
    "item_id_s, item_index_map = Construct_ID_map(df_training_data, 'item_id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[5., 3., 4., ..., 0., 0., 0.],\n",
       "       [4., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       ...,\n",
       "       [5., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 0., 0., ..., 0., 0., 0.],\n",
       "       [0., 5., 0., ..., 0., 0., 0.]])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 用户与物品的打分矩阵\n",
    "user_item_rating_array = np.zeros(shape=(len(user_id_s),len(item_id_s)))\n",
    "\n",
    "user_rating_map = defaultdict(set)  # 用户打分商品的索引集合\n",
    "item_rating_map = defaultdict(set)  # 商品获用户打分索引集合\n",
    "\n",
    "for row_index in df_training_data.index:\n",
    "    # 每一行的数据\n",
    "    row_data = df_training_data.iloc[row_index]\n",
    "    # 打分用户的索引\n",
    "    user_index = user_index_map[row_data['user_id']]\n",
    "    # 打分电影的索引\n",
    "    item_index = item_index_map[row_data['item_id']]\n",
    "    # 添加用户打分商品索引集合\n",
    "    user_rating_map[user_index].add(item_index)\n",
    "    item_rating_map[item_index].add(user_index)\n",
    "    \n",
    "    # 矩阵中行=user_index，列=item_index的元素赋值为打分\n",
    "    user_item_rating_array[user_index,item_index] = row_data['rating']\n",
    "user_item_rating_array"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 用户活跃度、商品热度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "655    685\n",
       "405    582\n",
       "450    494\n",
       "537    490\n",
       "416    417\n",
       "      ... \n",
       "93       9\n",
       "172      9\n",
       "258      8\n",
       "228      7\n",
       "310      4\n",
       "Name: user_id, Length: 943, dtype: int64"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_training_data['user_id'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算热度\n",
    "def calculate_activity(df, colname, id_index_map):\n",
    "    activity_s = df[colname].value_counts()    \n",
    "    activity_v = np.zeros(shape = (len(id_index_map),))\n",
    "    \n",
    "    #dmax = activity_s.iloc[0]\n",
    "    #activity_s = activity_s/dmax\n",
    "    \n",
    "    for ii in activity_s.index:\n",
    "        id_index = id_index_map[ii]\n",
    "        activity_v[id_index] = round(1/np.log(1+activity_s.loc[ii]),4)\n",
    "        \n",
    "    return activity_v\n",
    "\n",
    "user_activity_v = calculate_activity(df_training_data, 'user_id', user_index_map)\n",
    "item_activity_v = calculate_activity(df_training_data, 'item_id', item_index_map)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 用户的平均打分向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 计算用户的平均打分向量\n",
    "def calculate_user_rating_mu():\n",
    "    user_rating_mu_s = np.zeros_like(user_id_s, dtype=float)\n",
    "    user_activity_s = np.zeros_like(user_id_s, dtype=float)\n",
    "    \n",
    "    for user_index in range(len(user_id_s)):        \n",
    "        item_rating_v = np.take(\n",
    "            user_item_rating_array[user_index], # 该用户的打分行向量\n",
    "            list(user_rating_map[user_index])   # 该用户打过分的电影索引\n",
    "        )\n",
    "        # 打分向量的平均值\n",
    "        user_rating_mu_s[user_index] = (round(item_rating_v.mean(),2))\n",
    "        user_activity_s[user_index] = len(item_rating_v)\n",
    "            \n",
    "    return user_rating_mu_s\n",
    "\n",
    "# 得到用户打分的平均值，列表类型，列表索引对应user_index\n",
    "user_rating_mu_s = calculate_user_rating_mu()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 标准化用户物品打分矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "user_item_rating_array_norm = np.array(user_item_rating_array)\n",
    "for user_index in range(len(user_id_s)):        \n",
    "        for item_index in user_rating_map[user_index]:\n",
    "            user_item_rating_array_norm[user_index, item_index] -= user_rating_mu_s[user_index]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 相似度矩阵 (常规算法及变种算法)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义相似度函数\n",
    "# modified_method = False, 常规算法；modified_method = True, 变种算法 \n",
    "# axis = 0: 用户相似度， axis = 1: 商品相似度\n",
    "def calculate_sim(index1,index2, axis = 0, modified_method = False, index_activity = None):\n",
    "    if axis ==0:\n",
    "        data1 = user_item_rating_array_norm[index1]\n",
    "        data2 = user_item_rating_array_norm[index2]\n",
    "        \n",
    "        # 取用户1和用户2公共打分的电影集合，转换成列表\n",
    "        intersection_index_s = list(user_rating_map[index1] & user_rating_map[index2])\n",
    "        \n",
    "    else:\n",
    "        data1 = user_item_rating_array_norm[:,index1]\n",
    "        data2 = user_item_rating_array_norm[:,index2]\n",
    "         \n",
    "        # 取同时为商品1和商品2打分的用户，转换成列表\n",
    "        intersection_index_s = list(item_rating_map[index1] & item_rating_map[index2])\n",
    "    \n",
    "    # 如果没有公共的打分项，相似度为0.0\n",
    "    if not intersection_index_s:\n",
    "        return 0.0\n",
    "    \n",
    "    # 根据公共索引，取到用户1的打分向量，并去均值\n",
    "    v1 = np.take(data1, intersection_index_s)     \n",
    "    \n",
    "    # 根据公共索引，取到用户2的打分向量，并去均值\n",
    "    v2 = np.take(data2, intersection_index_s)  \n",
    " \n",
    "    \n",
    "    # 计算相似度\n",
    "    if modified_method and index_activity is not None:\n",
    "        # 公共打分的电影热度\n",
    "        vheat = np.take(index_activity, intersection_index_s)\n",
    "        vdot = v1 * v2 * vheat\n",
    "    else:\n",
    "        vdot = v1 * v2        \n",
    "    \n",
    "    sim = vdot.sum()/np.sqrt(np.sum(v1*v1))/np.sqrt(np.sum(v2*v2))\n",
    "    #sim = 1 - ssd.cosine(v1,v2)\n",
    "    \n",
    "    # 如果相似度不是数字（如果v1或v2是0向量），返回相似度0\n",
    "    if np.isnan(sim):\n",
    "        return 0.0\n",
    "    # 否则相似度保留两位小数，返回结果\n",
    "    else:\n",
    "        return round(sim,2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算相似度矩阵\n",
    "# modified_method = False, 常规算法；modified_method = True, 变种算法 \n",
    "# axis = 0: 用户相似度， axis = 1: 商品相似度\n",
    "def calculate_similarity_array(axis = 0, modified_method = False, activity_array = None):\n",
    "    if axis == 0:\n",
    "        similarity_array = np.zeros(shape=(len(user_id_s),len(user_id_s)))\n",
    "    else:\n",
    "        similarity_array = np.zeros(shape=(len(item_id_s),len(item_id_s)))\n",
    "        \n",
    "    nSize = similarity_array.shape[0]\n",
    "    for index1 in range(nSize):\n",
    "        #print('计算到用户：%s与其余用户的相似度' % user_index1)\n",
    "        for index2 in range(index1 + 1,nSize):\n",
    "            # 计算用户1和用户2的相似度\n",
    "            sim = calculate_sim(index1,index2, axis, modified_method, activity_array)\n",
    "            # 用户1与用户2的相似度=sim\n",
    "            similarity_array[index1,index2] = sim\n",
    "            # 用户2与用户1的相似度=sim\n",
    "            similarity_array[index2,index1] = sim\n",
    "            \n",
    "    return similarity_array"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 计算用户相似度，常规算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\python\\python37\\lib\\site-packages\\ipykernel_launcher.py:38: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[ 0.  ,  0.52,  0.53, ...,  0.81, -0.18,  0.3 ],\n",
       "       [ 0.52,  0.  ,  0.14, ...,  0.04,  0.25,  0.71],\n",
       "       [ 0.53,  0.14,  0.  , ...,  0.74,  0.51,  1.  ],\n",
       "       ...,\n",
       "       [ 0.81,  0.04,  0.74, ...,  0.  , -0.46,  0.01],\n",
       "       [-0.18,  0.25,  0.51, ..., -0.46,  0.  ,  0.42],\n",
       "       [ 0.3 ,  0.71,  1.  , ...,  0.01,  0.42,  0.  ]])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 用户相似度矩阵 (常规算法)\n",
    "user_similarity_array = calculate_similarity_array(axis = 0, modified_method = False)\n",
    "user_similarity_array"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 计算商品相似度 (常规算法)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\python\\python37\\lib\\site-packages\\ipykernel_launcher.py:38: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[ 0.  , -0.21, -0.1 , ...,  0.  ,  1.  , -1.  ],\n",
       "       [-0.21,  0.  , -0.14, ...,  0.  ,  1.  ,  1.  ],\n",
       "       [-0.1 , -0.14,  0.  , ...,  0.  ,  0.  ,  1.  ],\n",
       "       ...,\n",
       "       [ 0.  ,  0.  ,  0.  , ...,  0.  ,  0.  ,  0.  ],\n",
       "       [ 1.  ,  1.  ,  0.  , ...,  0.  ,  0.  ,  0.  ],\n",
       "       [-1.  ,  1.  ,  1.  , ...,  0.  ,  0.  ,  0.  ]])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 商品相似度矩阵 (常规算法)\n",
    "item_similarity_array = calculate_similarity_array(axis = 1, modified_method = False)\n",
    "item_similarity_array"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 通过相似用户的打分，对商品评分的预测矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用户对商品的预测矩阵，已打分的商品，预测分数为0分\n",
    "user_item_predict_rating_array = np.zeros_like(user_item_rating_array)\n",
    "\n",
    "def predict_by_user_cf():\n",
    "    # 对所有的用户index进行遍历\n",
    "    for user_index in range(len(user_id_s)):\n",
    "        \n",
    "        # 对所有商品进行遍历\n",
    "        for item_index in range(len(item_id_s)):            \n",
    "            \n",
    "            if item_index not in user_rating_map[user_index]: # 如果这个商品没有被打过分\n",
    "                \n",
    "                # 找到对这个商品打过分的所有用户的索引\n",
    "                user_rating_index_v = list(item_rating_map[item_index])\n",
    "                                           \n",
    "                # 如果没有用户对这个商品打过分，continue\n",
    "                if not user_rating_index_v:\n",
    "                    continue\n",
    "                \n",
    "                # 根据用户打分的索引，从用户相似度矩阵中取出相似度向量\n",
    "                user_sim_v = np.take(user_similarity_array[user_index], user_rating_index_v)\n",
    "                \n",
    "                # 计算相似度绝对值加和\n",
    "                user_sim_abs_sum = user_sim_v.__abs__().sum()\n",
    "                # 如果相似度绝对值加和为0（也就是存在对这个商品打过分的用户群体，但这些用户群体与目标用户的相关度都为0），continue\n",
    "                if user_sim_abs_sum == 0:\n",
    "                    continue\n",
    "                \n",
    "                # 得到用户打分的向量\n",
    "                user_rating_v = np.take(user_item_rating_array_norm[:,item_index], user_rating_index_v)                \n",
    "                predict_rating = round(np.dot(user_rating_v,user_sim_v)/user_sim_abs_sum,2)+ user_rating_mu_s[user_index]\n",
    "                \n",
    "                # 把预测的结果添加到预测矩阵中\n",
    "                user_item_predict_rating_array[user_index,item_index] = predict_rating   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[0.  , 0.  , 0.  , ..., 3.18, 3.7 , 3.31],\n",
       "       [0.  , 3.53, 3.48, ..., 3.3 , 3.82, 3.43],\n",
       "       [3.08, 2.96, 2.64, ..., 3.5 , 3.02, 0.  ],\n",
       "       ...,\n",
       "       [0.  , 3.81, 3.72, ..., 4.55, 4.07, 3.68],\n",
       "       [4.5 , 4.13, 3.96, ..., 3.77, 4.29, 3.9 ],\n",
       "       [3.67, 0.  , 2.88, ..., 2.91, 3.43, 3.04]])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 打印预测矩阵\n",
    "predict_by_user_cf()\n",
    "user_item_predict_rating_array "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 通过商品的相似性，对商品评分的预测矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用户对商品的预测矩阵，已打分的商品，预测分数为0分\n",
    "user_item_predict_rating_array2 = np.zeros_like(user_item_rating_array)\n",
    "\n",
    "def predict_by_item_cf():\n",
    "    # 对所有的用户index进行遍历\n",
    "    for user_index in range(len(user_id_s)):\n",
    "        \n",
    "        # 找到用户对所有其他打过分的商品的索引\n",
    "        item_rating_index_v = list(user_rating_map[user_index])\n",
    "        \n",
    "        # 对所有商品进行遍历\n",
    "        for item_index in range(len(item_id_s)): \n",
    "            if item_index not in item_rating_index_v: # 如果这个商品没有被打过分                \n",
    "\n",
    "                # 如果用户没有对任何商品打过分，continue\n",
    "                if not item_rating_index_v:\n",
    "                    continue\n",
    "                \n",
    "                # 根据用户打分的商品索引，从商品相似度矩阵中取出相似度向量\n",
    "                item_sim_v = np.take(item_similarity_array[item_index], item_rating_index_v)\n",
    "                \n",
    "                # 计算相似度绝对值加和\n",
    "                item_sim_abs_sum = item_sim_v.__abs__().sum()                \n",
    "                if item_sim_abs_sum == 0:\n",
    "                    continue\n",
    "                \n",
    "                # 得到用户打分的向量\n",
    "                item_rating_v = np.take(user_item_rating_array_norm[user_index,:], item_rating_index_v) \n",
    "                \n",
    "                predict_rating = round(np.dot(item_rating_v,item_sim_v)/item_sim_abs_sum,2) + user_rating_mu_s[user_index]\n",
    "               \n",
    "                # 把预测的结果添加到预测矩阵中\n",
    "                user_item_predict_rating_array2[user_index,item_index] = predict_rating   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[0.  , 0.  , 0.  , ..., 3.32, 4.17, 3.37],\n",
       "       [0.  , 3.32, 3.22, ..., 3.86, 3.9 , 3.66],\n",
       "       [3.43, 2.98, 2.5 , ..., 3.1 , 3.22, 3.  ],\n",
       "       ...,\n",
       "       [0.  , 3.56, 3.56, ..., 4.32, 4.16, 3.63],\n",
       "       [4.45, 4.13, 4.21, ..., 4.04, 4.43, 4.27],\n",
       "       [4.04, 0.  , 2.83, ..., 2.92, 3.78, 3.18]])"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "predict_by_item_cf()\n",
    "\n",
    "# 打印预测矩阵\n",
    "user_item_predict_rating_array2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 对比测试集，计算均方误差"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试集中不重复的用户id\n",
    "user_test_unique_s = df_test_data['user_id'].unique()\n",
    "\n",
    "user_index_test_s = [] # 同时存在于训练集和测试集中的用户（user_id）对应的user_index\n",
    "\n",
    "# 对测试集中的用户id进行遍历\n",
    "for user_id in user_test_unique_s:\n",
    "    # 如果测试集中的用户id在训练集的用户索引map中，添加这个user_index\n",
    "    if user_id in user_index_map.keys():\n",
    "        user_index_test_s.append(user_index_map[user_id])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "      <th>4</th>\n",
       "      <th>5</th>\n",
       "      <th>6</th>\n",
       "      <th>7</th>\n",
       "      <th>8</th>\n",
       "      <th>9</th>\n",
       "      <th>...</th>\n",
       "      <th>1640</th>\n",
       "      <th>1641</th>\n",
       "      <th>1642</th>\n",
       "      <th>1643</th>\n",
       "      <th>1644</th>\n",
       "      <th>1645</th>\n",
       "      <th>1646</th>\n",
       "      <th>1647</th>\n",
       "      <th>1648</th>\n",
       "      <th>1649</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>456</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>457</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>458</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>459</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>461</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>459 rows × 1650 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "     0     1     2     3     4     5     6     7     8     9     ...  1640  \\\n",
       "0     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "1     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   4.0  ...   0.0   \n",
       "2     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "3     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "4     4.0   3.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "..    ...   ...   ...   ...   ...   ...   ...   ...   ...   ...  ...   ...   \n",
       "456   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "457   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "458   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "459   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "461   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "\n",
       "     1641  1642  1643  1644  1645  1646  1647  1648  1649  \n",
       "0     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "1     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "2     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "3     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "4     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "..    ...   ...   ...   ...   ...   ...   ...   ...   ...  \n",
       "456   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "457   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "458   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "459   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "461   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "\n",
       "[459 rows x 1650 columns]"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 上述测试集用户的打分矩阵\n",
    "df_user_item_rating_test = pd.DataFrame(np.zeros(shape=(len(user_index_test_s),len(item_id_s))),index = user_index_test_s)\n",
    "\n",
    "# 对dataframe的index进行遍历\n",
    "for row_index in df_test_data.index:\n",
    "    row_data = df_test_data.loc[row_index]\n",
    "    \n",
    "    # 如果这个用户id在user_rating_map中，商品id也在item_index_map中，添加这次打分\n",
    "    if row_data['user_id'] in user_rating_map.keys() and row_data['item_id'] in item_index_map.keys():\n",
    "        df_user_item_rating_test[item_index_map[row_data['item_id']]][user_index_map[row_data['user_id']]] = row_data['rating']\n",
    "\n",
    "# 打印dataframe\n",
    "df_user_item_rating_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 计算均方误差\n",
    "def calculate_RMSE(user_item_predict_rating_array):\n",
    "    # acc_locc为分子，二次损失\n",
    "    acc_loss = 0\n",
    "    # acc_num为分母，一共计算了多少项\n",
    "    acc_num = 0\n",
    "    # 对测试集的index进行遍历\n",
    "    for user_index in df_user_item_rating_test.index:\n",
    "        \n",
    "        test_row_data = np.array(df_user_item_rating_test.loc[user_index])  # 测试集中user_index对应的分数用户用户该用户分数行向量\n",
    "        test_index_v =  np.where(test_row_data > 0)  # 测试集中对应用户用户该用户分数打过分的商品索引\n",
    "        \n",
    "        predict_row_data = user_item_predict_rating_array[user_index]  # 预测矩阵中该用户分数的分数行向量       \n",
    "        predict_index_v = np.where(predict_row_data > 0)  # 预测矩阵中对应用户该用户分数打过分的商品索引\n",
    "        \n",
    "        # 取test_index_v和predict_index_v的交集，即预测过打分，而且也在测试集中出现实际打分\n",
    "        intersection_index_s = list(\n",
    "            set(test_index_v[0]) & set(predict_index_v[0])\n",
    "        )\n",
    "        # 如果交集为空，continue\n",
    "        if not intersection_index_s:\n",
    "            continue\n",
    "        \n",
    "        # 根据上述的交集索引，取得测试集中的打分向量和预测矩阵中的打分向量\n",
    "        test_rating_v = np.take(test_row_data,intersection_index_s)\n",
    "        predict_rating_v = np.clip(\n",
    "            np.take(predict_row_data,intersection_index_s),0,5\n",
    "        )\n",
    "        \n",
    "        # 计算二次损失\n",
    "        acc_loss += np.square(test_rating_v - predict_rating_v).sum()\n",
    "        # 分母叠加个数\n",
    "        acc_num += len(intersection_index_s)\n",
    "    \n",
    "    # 得出均方误差\n",
    "    return np.sqrt(acc_loss/acc_num)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 根据User-CF预测分数的均方差"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9663649394886585"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 计算均方误差并打印\n",
    "RMSE = calculate_RMSE(user_item_predict_rating_array)\n",
    "RMSE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 根据Item-CF预测分数的均方差"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.97871981586394"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 计算均方误差并打印\n",
    "RMSE = calculate_RMSE(user_item_predict_rating_array2)\n",
    "RMSE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 推荐商品, 计算推荐商品的准确率、召回率和覆盖率"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 推荐商品"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 推荐商品，predict_quantity是推荐的商品的个数\n",
    "def predict(user_item_predict_rating_array,predict_quantity):\n",
    "    \n",
    "    # 建立一个商品推荐字典,保存对user_index推荐的商品索引\n",
    "    # 例如：predict_item_index_map[user_index] = [4,3,2,5] \n",
    "    predict_item_index_map = {}\n",
    "    \n",
    "    # 对训练集中所有的user_index进行遍历\n",
    "    for user_index in range(len(user_id_s)):\n",
    "        \n",
    "        # 预测矩阵中对应user_index的向量，进行倒序排列\n",
    "        predict_item_index_v = list(np.argsort(-user_item_predict_rating_array[user_index]))\n",
    "        \n",
    "        # 取min(推荐个数，商品个数)个预测分数最高的商品索引\n",
    "        predict_item_index_v = predict_item_index_v[0:min(predict_quantity,len(predict_item_index_v))]\n",
    "        \n",
    "        # 添加到商品推荐字典中\n",
    "        predict_item_index_map[user_index] = predict_item_index_v\n",
    "    \n",
    "    # 返回商品推荐字典\n",
    "    return predict_item_index_map"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 准确率与召回率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算准确率与召回率\n",
    "def calculate_precision_and_recall(predict_item_index_map):\n",
    "    \n",
    "    union_num = 0     # 推荐的商品也在测试集中出现的总数    \n",
    "    predict_num = 0   # 推荐的商品的总数    \n",
    "    test_num = 0      # 测试集中出现的商品总数\n",
    "    \n",
    "    # 对测试集的user_index进行遍历\n",
    "    for user_index in df_user_item_rating_test.index:\n",
    "        # 对测试集中的user_index打过分的商品，进行倒排序，得到索引\n",
    "        #（这里其实没有使用到倒序排列功能，比如可以取测试集中倒序的前100个所以test_item_v）\n",
    "        test_item_v = np.where(df_user_item_rating_test[user_index]>=3)[0].tolist()\n",
    "        \n",
    "        # 推荐的商品也在测试集中出现的总数做叠加\n",
    "        union_num += len(\n",
    "            set(predict_item_index_map[user_index]) & set(test_item_v)\n",
    "        )\n",
    "        # 推荐的商品的总数做叠加\n",
    "        predict_num += len(predict_item_index_map[user_index])\n",
    "        \n",
    "        # 测试集中出现的商品总数做叠加\n",
    "        test_num += len(test_item_v)\n",
    "        \n",
    "    # 返回准确率与召回率\n",
    "    return union_num / predict_num,union_num/test_num"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* 覆盖率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义计算覆盖率\n",
    "def calculate_coverage(predict_item_index_map):\n",
    "    # 推荐的物品索引集合\n",
    "    predict_item_index_set = set()\n",
    "    # 把所有用户推荐过的商品id都添加到predict_item_index_set里，然后根据predict_item_index_set的数量，计算覆盖度\n",
    "    for user_index in predict_item_index_map.keys():\n",
    "        for item_index in predict_item_index_map[user_index]:\n",
    "            predict_item_index_set.add(item_index)\n",
    "    return len(predict_item_index_set) / len(item_id_s)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 根据 User-CF 推荐"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision= 0.0024400871459694987\n",
      "recall= 0.0055232271427162445\n",
      "coverage= 0.39636363636363636\n"
     ]
    }
   ],
   "source": [
    "# 推荐50个商品\n",
    "predict_item_index_map = predict(user_item_predict_rating_array,50)\n",
    "\n",
    "# 计算准确率与召回率\n",
    "precision,recall = calculate_precision_and_recall(predict_item_index_map)\n",
    "print('precision=',precision)\n",
    "print('recall=',recall)\n",
    "\n",
    "# 计算并打印覆盖度\n",
    "coverage = calculate_coverage(predict_item_index_map)\n",
    "print('coverage=',coverage)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 根据 Item-CF 推荐"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision= 0.021742919389978212\n",
      "recall= 0.049215899003846535\n",
      "coverage= 0.9557575757575758\n"
     ]
    }
   ],
   "source": [
    "# 推荐50个商品\n",
    "predict_item_index_map2 = predict(user_item_predict_rating_array2,50)\n",
    "\n",
    "# 计算准确率与召回率\n",
    "precision,recall = calculate_precision_and_recall(predict_item_index_map2)\n",
    "print('precision=',precision)\n",
    "print('recall=',recall)\n",
    "\n",
    "# 计算并打印覆盖度\n",
    "coverage = calculate_coverage(predict_item_index_map2)\n",
    "print('coverage=',coverage)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 结论：根据 Item-CF 预测分数、推荐商品，具有更高的准确度"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 变种 User-CF、变种Item-CF 计算商品相似度，推荐商品"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\python\\python37\\lib\\site-packages\\ipykernel_launcher.py:38: RuntimeWarning: invalid value encountered in double_scalars\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "根据变种 User-CF 推荐商品 ...\n",
      "RMSE =  0.9649885287287755\n",
      "precision =  0.002570806100217865\n",
      "recall =  0.005819114311076043\n",
      "coverage =  0.3981818181818182\n",
      "\n",
      "根据变种 Item-CF 推荐商品 ...\n",
      "RMSE =  0.9784426298807453\n",
      "precision =  0.02122004357298475\n",
      "recall =  0.048032350330407335\n",
      "coverage =  0.9527272727272728\n"
     ]
    }
   ],
   "source": [
    "user_similarity_array = calculate_similarity_array(axis = 0, modified_method = True, activity_array = item_activity_v)\n",
    "item_similarity_array = calculate_similarity_array(axis = 1, modified_method = True, activity_array = user_activity_v)\n",
    "\n",
    "print('\\n根据变种 User-CF 推荐商品 ...')\n",
    "\n",
    "predict_by_user_cf()\n",
    "RMSE = calculate_RMSE(user_item_predict_rating_array)\n",
    "print('RMSE = ', RMSE)\n",
    "\n",
    "predict_item_index_map = predict(user_item_predict_rating_array,50)\n",
    "precision,recall = calculate_precision_and_recall(predict_item_index_map)\n",
    "print('precision = ',precision)\n",
    "print('recall = ',recall)\n",
    "coverage = calculate_coverage(predict_item_index_map)\n",
    "print('coverage = ',coverage)\n",
    "\n",
    "print('\\n根据变种 Item-CF 推荐商品 ...')\n",
    "\n",
    "predict_by_item_cf()\n",
    "RMSE = calculate_RMSE(user_item_predict_rating_array2)\n",
    "print('RMSE = ', RMSE)\n",
    "\n",
    "predict_item_index_map2 = predict(user_item_predict_rating_array2,50)\n",
    "precision,recall = calculate_precision_and_recall(predict_item_index_map2)\n",
    "print('precision = ',precision)\n",
    "print('recall = ',recall)\n",
    "coverage = calculate_coverage(predict_item_index_map2)\n",
    "print('coverage = ',coverage)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 本案例中，通过变种公式计算相似度矩阵，对分数预测、推荐商品作用不大"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
