{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e098d6ad",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import random\n",
    "import time\n",
    "import math"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5badb0b9",
   "metadata": {},
   "source": [
    "读取数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "861b8b45",
   "metadata": {},
   "outputs": [],
   "source": [
    "test=pd.read_csv('E:/推荐系统/数据集/MovieLens/ml-latest/中间量/评分预测问题_1千用户_test.csv')\n",
    "train=pd.read_csv('E:/推荐系统/数据集/MovieLens/ml-latest/中间量/评分预测问题_1千用户_train.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "358b713c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集中的电影部数：8529\n",
      "训练集中的用户数：977\n"
     ]
    }
   ],
   "source": [
    "print(\"训练集中的电影部数：%d\"%len(train['movieId'].unique()))\n",
    "print(\"训练集中的用户数：%d\"%len(train['userId'].unique()))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af31f532",
   "metadata": {},
   "source": [
    "函数定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "de1b0555",
   "metadata": {},
   "outputs": [],
   "source": [
    "def UserSimilarity(train):\n",
    "    '''\n",
    "    user_rating_ave:每个用户在训练集中的平均评分\n",
    "    Cov：相似度的分子可以认为是两个用户评分向量(关于交互的物品交集)的协方差，故记作Cov\n",
    "    Var：相似度的分母可以认为是两个用户各自评分向量的标准差，故记作Var\n",
    "    存在某些用户的评分完全相同，此时对应的Var部分为0，relevance为NAN\n",
    "    '''\n",
    "    user_rating_ave={}\n",
    "    for userId,result in train.groupby('userId'):\n",
    "        result=result.reset_index(drop=True)\n",
    "        user_rating_ave[userId]=sum(result['rating'])/result.shape[0]\n",
    "        \n",
    "    Var={}\n",
    "    Cov={}\n",
    "    for movieId,result in train.groupby('movieId'):\n",
    "        result=result.reset_index(drop=True)\n",
    "        for user_index1 in range(0,result.shape[0]):\n",
    "            userId1=result.iloc[user_index1]['userId']\n",
    "            if userId1 not in Var:\n",
    "                Var[userId1]=0\n",
    "            Var[userId1]+=(result.iloc[user_index1]['rating']-user_rating_ave[userId1])**2\n",
    "            for user_index2 in range(user_index1+1,result.shape[0]):\n",
    "                userId2=result.iloc[user_index2]['userId']\n",
    "                if (userId1,userId2) not in Cov:\n",
    "                    Cov[userId1,userId2]=0\n",
    "                    Cov[userId2,userId1]=0\n",
    "                Cov[userId1,userId2]+=(result.iloc[user_index1]['rating']-user_rating_ave[userId1])*(result.iloc[user_index2]['rating']-user_rating_ave[userId2])\n",
    "                Cov[userId2,userId1]+=(result.iloc[user_index2]['rating']-user_rating_ave[userId2])*(result.iloc[user_index1]['rating']-user_rating_ave[userId1])\n",
    "    W={x:(y/math.sqrt(Var[x[0]]*Var[x[1]])) for x,y in Cov.items()}\n",
    "    return W\n",
    "            "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "id": "f83ef84f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def GetKNeighbors(df_W,userId,K):\n",
    "    temp=df_W[df_W['user1']==userId]\n",
    "    temp=temp.reset_index(drop=True)\n",
    "    index_list=np.argsort(list(temp['relevance']))[-K:]\n",
    "    KNeighbors=[]\n",
    "    for i in index_list:\n",
    "        KNeighbors.append(temp.loc[i,'user2'])\n",
    "        \n",
    "    return set(KNeighbors)\n",
    "def rmse(rating1,rating2):\n",
    "    #rating2为实际评分，rating1为预测\n",
    "    return (np.sum((rating1 - rating2) ** 2) / len(rating2))**0.5\n",
    "def mae(rating1,rating2):\n",
    "    return abs(np.sum((rating1 - rating2)) / len(rating2))    "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ea94a29",
   "metadata": {},
   "source": [
    "预测部分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "53b9bebd",
   "metadata": {},
   "outputs": [],
   "source": [
    "W=UserSimilarity(train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 125,
   "id": "f6f63ad9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def Predict(train,test,K,W):\n",
    "    user1_lst=[]\n",
    "    user2_lst=[]\n",
    "    for key in W.keys():\n",
    "        user1_lst.append(key[0])\n",
    "        user2_lst.append(key[1])\n",
    "    df_W=pd.DataFrame({'user1':user1_lst,\n",
    "                       'user2':user2_lst,\n",
    "                       'relevance':list(W.values())})\n",
    "    df_W.columns=('user1','user2','relevance')\n",
    "    '''\n",
    "    userId:待预测的测试集用户\n",
    "    movieId：待预测的电影\n",
    "    userKneighbors：测试集用户的K近邻\n",
    "    intersection：userId用户的K近邻与评价过movieId的用户的交集\n",
    "    r_v_movieId：intersection中用户对movieId的评分\n",
    "    r_v_ave：intersection中用户在训练集中的评分均值\n",
    "    norm：预测的修正部分的分母，标准化项\n",
    "    '''\n",
    "    test['pre']=-1  \n",
    "    for index in range(test.shape[0]):\n",
    "        predict=0\n",
    "        userId=test.iloc[index]['userId']\n",
    "        movieId=test.iloc[index]['movieId']\n",
    "        if train[train['userId']==userId].shape[0]==0:                                          #训练集中不含有该用户的信息\n",
    "            print(\"无法对用户%d关于电影%d做出预测\"%(int(userId),int(movieId)))\n",
    "            continue\n",
    "\n",
    "        userKneighbors=GetKNeighbors(df_W,userId,K)\n",
    "        intersection=userKneighbors.intersection(set(train[train['movieId']==movieId]['userId']))\n",
    "        if len(intersection)==0:                                                                #无法基于邻域进行修正，故返回用户在训练集中的评分均值\n",
    "            predict=np.mean(train[train['userId']==userId]['rating'])\n",
    "            test.loc[index,'pre']=predict\n",
    "            continue\n",
    "\n",
    "        norm=0\n",
    "        for v in intersection:                                                                  #基于用户的邻域用户信息，修正预测\n",
    "            temp=train[train['userId']==v]\n",
    "            r_v_movieId=temp[temp['movieId']==movieId]['rating'].values\n",
    "            r_v_ave=np.mean(temp['rating'])\n",
    "            predict+=W[userId,v]*(r_v_movieId-r_v_ave)\n",
    "            norm+=abs(W[userId,v])\n",
    "        test.loc[index,'pre']=predict/norm\n",
    "    return test[test['pre']>0]['pre'],test[test['pre']>0]['rating']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 130,
   "id": "0c08eff5",
   "metadata": {},
   "outputs": [],
   "source": [
    "rating1,rating2=Predict(train,test,10,W)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 132,
   "id": "284f5377",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RMSE为：1.12193\n",
      "MAE为：0.03236\n"
     ]
    }
   ],
   "source": [
    "print(\"RMSE为：%.5f\"%rmse(rating1,rating2))\n",
    "print(\"MAE为：%.5f\"%mae(rating1,rating2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8386807b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
