{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "启动训练前，复用前面章节的数据处理和神经网络模型代码，已阅读可直接跳过。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import paddle \n",
    "import paddle.fluid as fluid\n",
    "import paddle.fluid.dygraph as dygraph\n",
    "from paddle.fluid.dygraph import FC, Conv2D, Embedding, Pool2D\n",
    "import numpy as np\n",
    "import random\n",
    "\n",
    "\n",
    "class MovieLen(object):\n",
    "    def __init__(self, use_poster):\n",
    "        self.use_poster = use_poster\n",
    "        # 声明每个数据文件的路径\n",
    "        usr_info_path = \"./work/ml-1m/users.dat\"\n",
    "        if not use_poster:\n",
    "            rating_path = \"./work/ml-1m/ratings.dat\"\n",
    "        else:\n",
    "            rating_path = \"./work/ml-1m/new_rating.txt\"\n",
    "\n",
    "        movie_info_path = \"./work/ml-1m/movies.dat\"\n",
    "        self.poster_path = \"./work/ml-1m/posters/\"\n",
    "        # 得到电影数据\n",
    "        self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)\n",
    "        # 记录电影的最大ID\n",
    "        self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])\n",
    "        self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])\n",
    "        self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))\n",
    "        # 记录用户数据的最大ID\n",
    "        self.max_usr_id = 0\n",
    "        self.max_usr_age = 0\n",
    "        self.max_usr_job = 0\n",
    "        # 得到用户数据\n",
    "        self.usr_info = self.get_usr_info(usr_info_path)\n",
    "        # 得到评分数据\n",
    "        self.rating_info = self.get_rating_info(rating_path)\n",
    "        # 构建数据集 \n",
    "        self.dataset = self.get_dataset(usr_info=self.usr_info,\n",
    "                                        rating_info=self.rating_info,\n",
    "                                        movie_info=self.movie_info)\n",
    "        # 划分数据及，获得数据加载器\n",
    "        self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]\n",
    "        self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]\n",
    "        print(\"##Total dataset instances: \", len(self.dataset))\n",
    "        print(\"##MovieLens dataset information: \\nusr num: {}\\n\"\n",
    "              \"movies num: {}\".format(len(self.usr_info),len(self.movie_info)))\n",
    "    # 得到电影数据\n",
    "    def get_movie_info(self, path):\n",
    "        # 打开文件，编码方式选择ISO-8859-1，读取所有数据到data中 \n",
    "        with open(path, 'r', encoding=\"ISO-8859-1\") as f:\n",
    "            data = f.readlines()\n",
    "        # 建立三个字典，分别用户存放电影所有信息，电影的名字信息、类别信息\n",
    "        movie_info, movie_titles, movie_cat = {}, {}, {}\n",
    "        # 对电影名字、类别中不同的单词计数\n",
    "        t_count, c_count = 1, 1\n",
    "\n",
    "        count_tit = {}\n",
    "        # 按行读取数据并处理\n",
    "        for item in data:\n",
    "            item = item.strip().split(\"::\")\n",
    "            v_id = item[0]\n",
    "            v_title = item[1][:-7]\n",
    "            cats = item[2].split('|')\n",
    "            v_year = item[1][-5:-1]\n",
    "\n",
    "            titles = v_title.split()\n",
    "            # 统计电影名字的单词，并给每个单词一个序号，放在movie_titles中\n",
    "            for t in titles:\n",
    "                if t not in movie_titles:\n",
    "                    movie_titles[t] = t_count\n",
    "                    t_count += 1\n",
    "            # 统计电影类别单词，并给每个单词一个序号，放在movie_cat中\n",
    "            for cat in cats:\n",
    "                if cat not in movie_cat:\n",
    "                    movie_cat[cat] = c_count\n",
    "                    c_count += 1\n",
    "            # 补0使电影名称对应的列表长度为15\n",
    "            v_tit = [movie_titles[k] for k in titles]\n",
    "            while len(v_tit)<15:\n",
    "                v_tit.append(0)\n",
    "            # 补0使电影种类对应的列表长度为6\n",
    "            v_cat = [movie_cat[k] for k in cats]\n",
    "            while len(v_cat)<6:\n",
    "                v_cat.append(0)\n",
    "            # 保存电影数据到movie_info中\n",
    "            movie_info[v_id] = {'mov_id': int(v_id),\n",
    "                                'title': v_tit,\n",
    "                                'category': v_cat,\n",
    "                                'years': int(v_year)}\n",
    "        return movie_info, movie_cat, movie_titles\n",
    "\n",
    "    def get_usr_info(self, path):\n",
    "        # 性别转换函数，M-0， F-1\n",
    "        def gender2num(gender):\n",
    "            return 1 if gender == 'F' else 0\n",
    "\n",
    "        # 打开文件，读取所有行到data中\n",
    "        with open(path, 'r') as f:\n",
    "            data = f.readlines()\n",
    "        # 建立用户信息的字典\n",
    "        use_info = {}\n",
    "\n",
    "        max_usr_id = 0\n",
    "        #按行索引数据\n",
    "        for item in data:\n",
    "            # 去除每一行中和数据无关的部分\n",
    "            item = item.strip().split(\"::\")\n",
    "            usr_id = item[0]\n",
    "            # 将字符数据转成数字并保存在字典中\n",
    "            use_info[usr_id] = {'usr_id': int(usr_id),\n",
    "                                'gender': gender2num(item[1]),\n",
    "                                'age': int(item[2]),\n",
    "                                'job': int(item[3])}\n",
    "            self.max_usr_id = max(self.max_usr_id, int(usr_id))\n",
    "            self.max_usr_age = max(self.max_usr_age, int(item[2]))\n",
    "            self.max_usr_job = max(self.max_usr_job, int(item[3]))\n",
    "        return use_info\n",
    "    # 得到评分数据\n",
    "    def get_rating_info(self, path):\n",
    "        # 读取文件里的数据\n",
    "        with open(path, 'r') as f:\n",
    "            data = f.readlines()\n",
    "        # 将数据保存在字典中并返回\n",
    "        rating_info = {}\n",
    "        for item in data:\n",
    "            item = item.strip().split(\"::\")\n",
    "            usr_id,movie_id,score = item[0],item[1],item[2]\n",
    "            if usr_id not in rating_info.keys():\n",
    "                rating_info[usr_id] = {movie_id:float(score)}\n",
    "            else:\n",
    "                rating_info[usr_id][movie_id] = float(score)\n",
    "        return rating_info\n",
    "    # 构建数据集\n",
    "    def get_dataset(self, usr_info, rating_info, movie_info):\n",
    "        trainset = []\n",
    "        for usr_id in rating_info.keys():\n",
    "            usr_ratings = rating_info[usr_id]\n",
    "            for movie_id in usr_ratings:\n",
    "                trainset.append({'usr_info': usr_info[usr_id],\n",
    "                                 'mov_info': movie_info[movie_id],\n",
    "                                 'scores': usr_ratings[movie_id]})\n",
    "        return trainset\n",
    "    \n",
    "    def load_data(self, dataset=None, mode='train'):\n",
    "        use_poster = False\n",
    "\n",
    "        # 定义数据迭代Batch大小\n",
    "        BATCHSIZE = 256\n",
    "\n",
    "        data_length = len(dataset)\n",
    "        index_list = list(range(data_length))\n",
    "        # 定义数据迭代加载器\n",
    "        def data_generator():\n",
    "            # 训练模式下，打乱训练数据\n",
    "            if mode == 'train':\n",
    "                random.shuffle(index_list)\n",
    "            # 声明每个特征的列表\n",
    "            usr_id_list,usr_gender_list,usr_age_list,usr_job_list = [], [], [], []\n",
    "            mov_id_list,mov_tit_list,mov_cat_list,mov_poster_list = [], [], [], []\n",
    "            score_list = []\n",
    "            # 索引遍历输入数据集\n",
    "            for idx, i in enumerate(index_list):\n",
    "                # 获得特征数据保存到对应特征列表中\n",
    "                usr_id_list.append(dataset[i]['usr_info']['usr_id'])\n",
    "                usr_gender_list.append(dataset[i]['usr_info']['gender'])\n",
    "                usr_age_list.append(dataset[i]['usr_info']['age'])\n",
    "                usr_job_list.append(dataset[i]['usr_info']['job'])\n",
    "\n",
    "                mov_id_list.append(dataset[i]['mov_info']['mov_id'])\n",
    "                mov_tit_list.append(dataset[i]['mov_info']['title'])\n",
    "                mov_cat_list.append(dataset[i]['mov_info']['category'])\n",
    "                mov_id = dataset[i]['mov_info']['mov_id']\n",
    "\n",
    "                if use_poster:\n",
    "                    # 不使用图像特征时，不读取图像数据，加快数据读取速度\n",
    "                    poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))\n",
    "                    poster = poster.resize([64, 64])\n",
    "                    if len(poster.size) <= 2:\n",
    "                        poster = poster.convert(\"RGB\")\n",
    "\n",
    "                    mov_poster_list.append(np.array(poster))\n",
    "\n",
    "                score_list.append(int(dataset[i]['scores']))\n",
    "                # 如果读取的数据量达到当前的batch大小，就返回当前批次\n",
    "                if len(usr_id_list)==BATCHSIZE:\n",
    "                    # 转换列表数据为数组形式，reshape到固定形状\n",
    "                    usr_id_arr = np.expand_dims(np.array(usr_id_list), axis=-1)\n",
    "                    usr_gender_arr = np.expand_dims(np.array(usr_gender_list), axis=-1)\n",
    "                    usr_age_arr = np.expand_dims(np.array(usr_age_list), axis=-1)\n",
    "                    usr_job_arr = np.expand_dims(np.array(usr_job_list), axis=-1)\n",
    "\n",
    "                    mov_id_arr = np.expand_dims(np.array(mov_id_list), axis=-1)\n",
    "                    mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 1, 6, 1]).astype(np.int64)\n",
    "                    mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15, 1]).astype(np.int64)\n",
    "\n",
    "\n",
    "                    if use_poster:\n",
    "                        mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)\n",
    "                    else:\n",
    "                        mov_poster_arr = np.array([0.])\n",
    "\n",
    "                    scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)\n",
    "\n",
    "                    # 放回当前批次数据\n",
    "                    yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \\\n",
    "                           [mov_id_arr, mov_cat_arr, mov_tit_arr, mov_poster_arr], scores_arr\n",
    "\n",
    "                    # 清空数据\n",
    "                    usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []\n",
    "                    mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []\n",
    "                    mov_poster_list = []\n",
    "        return data_generator\n",
    "\n",
    "class Model(dygraph.layers.Layer):\n",
    "    def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat, use_age_job):\n",
    "        super(Model, self).__init__(name_scope)\n",
    "        name = self.full_name()\n",
    "        \n",
    "        # 将传入的name信息和bool型参数添加到模型类中\n",
    "        self.use_mov_poster = use_poster\n",
    "        self.use_mov_title = use_mov_title\n",
    "        self.use_usr_age_job = use_age_job\n",
    "        self.use_mov_cat = use_mov_cat\n",
    "        \n",
    "        # 获取数据集的信息，并构建训练和验证集的数据迭代器\n",
    "        Dataset = MovieLen(self.use_mov_poster)\n",
    "        self.Dataset = Dataset\n",
    "        self.trainset = self.Dataset.train_dataset\n",
    "        self.valset = self.Dataset.valid_dataset\n",
    "        self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')\n",
    "        self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')\n",
    "\n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        USR_ID_NUM = Dataset.max_usr_id + 1\n",
    "        # 对用户ID做映射，并紧接着一个FC层\n",
    "        self.usr_emb = Embedding(name, [USR_ID_NUM, 32], is_sparse=False)\n",
    "        self.usr_fc = FC(name, size=32)\n",
    "        \n",
    "        # 对用户性别信息做映射，并紧接着一个FC层\n",
    "        USR_GENDER_DICT_SIZE = 2\n",
    "        self.usr_gender_emb = Embedding(name, [USR_GENDER_DICT_SIZE, 16])\n",
    "        self.usr_gender_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户年龄信息做映射，并紧接着一个FC层\n",
    "        USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1\n",
    "        self.usr_age_emb = Embedding(name, [USR_AGE_DICT_SIZE, 16])\n",
    "        self.usr_age_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户职业信息做映射，并紧接着一个FC层\n",
    "        USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1\n",
    "        self.usr_job_emb = Embedding(name, [USR_JOB_DICT_SIZE, 16])\n",
    "        self.usr_job_fc = FC(name, 16)\n",
    "        \n",
    "        # 新建一个FC层，用于整合用户数据信息\n",
    "        self.usr_combined = FC(name, 200, act='tanh')\n",
    "        \n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        # 对电影ID信息做映射，并紧接着一个FC层\n",
    "        MOV_DICT_SIZE = Dataset.max_mov_id + 1\n",
    "        self.mov_emb = Embedding(name, [MOV_DICT_SIZE, 32])\n",
    "        self.mov_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影类别做映射\n",
    "        CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1\n",
    "        self.mov_cat_emb = Embedding(name, [CATEGORY_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_cat_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影名称做映射\n",
    "        MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1\n",
    "        self.mov_title_emb = Embedding(name, [MOV_TITLE_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_title_conv = Conv2D(name, 1, filter_size=(3, 1), stride=(2,1), padding=0, act='relu')\n",
    "        self.mov_title_conv2 = Conv2D(name, 1, filter_size=(3, 1), stride=1, padding=0, act='relu')\n",
    "        \n",
    "        # 新建一个FC层，用于整合电影特征\n",
    "        self.mov_concat_embed = FC(name, size=200, act='tanh')\n",
    "        \n",
    "    # 定义计算用户特征的前向运算过程\n",
    "    def get_usr_feat(self, usr_var):\n",
    "        \"\"\" get usr features\"\"\"\n",
    "        # 获取到用户数据\n",
    "        usr_id, usr_gender, usr_age, usr_job = usr_var\n",
    "        # 将用户的ID数据经过embedding和FC计算，得到的特征保存在feats_collect中\n",
    "        feats_collect = []\n",
    "        usr_id = self.usr_emb(usr_id)\n",
    "        usr_id = self.usr_fc(usr_id)\n",
    "        usr_id = fluid.layers.relu(usr_id)\n",
    "        feats_collect.append(usr_id)\n",
    "        \n",
    "        # 计算用户的性别特征，并保存在feats_collect中\n",
    "        usr_gender = self.usr_gender_emb(usr_gender)\n",
    "        usr_gender = self.usr_gender_fc(usr_gender)\n",
    "        usr_gender = fluid.layers.relu(usr_gender)\n",
    "        feats_collect.append(usr_gender)\n",
    "        # 选择是否使用用户的年龄-职业特征\n",
    "        if self.use_usr_age_job:\n",
    "            # 计算用户的年龄特征，并保存在feats_collect中\n",
    "            usr_age = self.usr_age_emb(usr_age)\n",
    "            usr_age = self.usr_age_fc(usr_age)\n",
    "            usr_age = fluid.layers.relu(usr_age)\n",
    "            feats_collect.append(usr_age)\n",
    "            # 计算用户的职业特征，并保存在feats_collect中\n",
    "            usr_job = self.usr_job_emb(usr_job)\n",
    "            usr_job = self.usr_job_fc(usr_job)\n",
    "            usr_job = fluid.layers.relu(usr_job)\n",
    "            feats_collect.append(usr_job)\n",
    "        \n",
    "        # 将用户的特征级联，并通过FC层得到最终的用户特征\n",
    "        usr_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        usr_feat = self.usr_combined(usr_feat)\n",
    "        return usr_feat\n",
    "\n",
    "        # 定义电影特征的前向计算过程\n",
    "    def get_mov_feat(self, mov_var):\n",
    "        \"\"\" get movie features\"\"\"\n",
    "        # 获得电影数据\n",
    "        mov_id, mov_cat, mov_title, mov_poster = mov_var\n",
    "        feats_collect = []\n",
    "        # 获得batchsize的大小\n",
    "        batch_size = mov_id.shape[0]\n",
    "        # 计算电影ID的特征，并存在feats_collect中\n",
    "        mov_id = self.mov_emb(mov_id)\n",
    "        mov_id = self.mov_fc(mov_id)\n",
    "        mov_id = fluid.layers.relu(mov_id)\n",
    "        feats_collect.append(mov_id)\n",
    "        \n",
    "        # 如果使用电影的种类数据，计算电影种类特征的映射\n",
    "        if self.use_mov_cat:\n",
    "            # 计算电影种类的特征映射，对多个种类的特征求和得到最终特征\n",
    "            mov_cat = self.mov_cat_emb(mov_cat)\n",
    "            mov_cat = fluid.layers.reduce_sum(mov_cat, dim=1, keep_dim=False)\n",
    "\n",
    "            mov_cat = self.mov_cat_fc(mov_cat)\n",
    "            feats_collect.append(mov_cat)\n",
    "\n",
    "        if self.use_mov_title:\n",
    "            # 计算电影名字的特征映射，对特征映射使用卷积计算最终的特征\n",
    "            mov_title = self.mov_title_emb(mov_title)\n",
    "            mov_title = self.mov_title_conv2(self.mov_title_conv(mov_title))\n",
    "            mov_title = fluid.layers.reduce_sum(mov_title, dim=2, keep_dim=False)\n",
    "            mov_title = fluid.layers.relu(mov_title)\n",
    "            mov_title = fluid.layers.reshape(mov_title, [batch_size, -1])\n",
    "            feats_collect.append(mov_title)\n",
    "            \n",
    "        # 使用一个全连接层，整合所有电影特征，映射为一个200维的特征向量\n",
    "        mov_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        mov_feat = self.mov_concat_embed(mov_feat)\n",
    "        return mov_feat\n",
    "    \n",
    "    # 定义个性化推荐算法的前向计算\n",
    "    def forward(self, usr_var, mov_var):\n",
    "        # 计算用户特征和电影特征\n",
    "        usr_feat = self.get_usr_feat(usr_var)\n",
    "        mov_feat = self.get_mov_feat(mov_var)\n",
    "        # 根据计算的特征计算相似度\n",
    "        res = fluid.layers.cos_sim(usr_feat, mov_feat)\n",
    "        # 将相似度扩大范围到和电影评分相同数据范围\n",
    "        res = fluid.layers.scale(res, scale=5)\n",
    "        return usr_feat, mov_feat, res\n",
    "   "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 模型训练\n",
    "\n",
    "首先需要定义好训练的配置，包括是否使用GPU、设置损失函数、选择优化器以及学习率等。\n",
    "在本次实验中，由于数据较为简单，我们选择在CPU上训练，优化器使用Adam，学习率设置为0.01，一共训练5个epoch。\n",
    "\n",
    "然而，针对推荐算法的网络，如何设计损失函数呢？在CV和NLP章节中我们了解，分类可以用交叉熵损失函数，损失函数的大小可以衡量出算法当前分类的准确性。在推荐算法中，没有一个准确的度量既能衡量推荐的好坏，并具备可导性质，又能监督神经网络的训练。在电影推荐中，可以作为标签的只有评分数据，因此，我们可以用评分数据作为监督信息，神经网络的输出作为预测值，使用均方差（Mean Square Error）损失函数去训练网络模型。\n",
    "\n",
    "注：使用均方差损失函数即是使用回归的方法完成模型训练，观察到，电影的评分数据只有5个，是否可以使用分类损失函数完成训练？事实上，评分数据应该是一个连续数据，比如，评分3和评分4是接近的，如果使用分类的方法，评分3和评分4是两个类别，容易割裂评分间的连续性。\n",
    "\n",
    "整个训练过程和一般的模型训练大同小异，不再赘述。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def train(model):\n",
    "    # 配置训练参数\n",
    "    use_gpu = False\n",
    "    lr = 0.01\n",
    "    Epoches = 10\n",
    "\n",
    "    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n",
    "    with fluid.dygraph.guard(place):\n",
    "        # 启动训练\n",
    "        model.train()\n",
    "        # 获得数据读取器\n",
    "        data_loader = model.train_loader\n",
    "        # 使用adam优化器，学习率使用0.01\n",
    "        opt = fluid.optimizer.Adam(learning_rate=lr)\n",
    "        \n",
    "        for epoch in range(0, Epoches):\n",
    "            for idx, data in enumerate(data_loader()):\n",
    "                # 获得数据，并转为动态图格式\n",
    "                usr, mov, score = data\n",
    "                usr_v = [dygraph.to_variable(var) for var in usr]\n",
    "                mov_v = [dygraph.to_variable(var) for var in mov]\n",
    "                scores_label = dygraph.to_variable(score)\n",
    "                # 计算出算法的前向计算结果\n",
    "                _, _, scores_predict = model(usr_v, mov_v)\n",
    "                # 计算loss\n",
    "                loss = fluid.layers.square_error_cost(scores_predict, scores_label)\n",
    "                avg_loss = fluid.layers.mean(loss)\n",
    "                if idx % 500 == 0:\n",
    "                    print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, idx, avg_loss.numpy()))\n",
    "                    \n",
    "                # 损失函数下降，并清除梯度\n",
    "                avg_loss.backward()\n",
    "                opt.minimize(avg_loss)\n",
    "                model.clear_gradients()\n",
    "            # 每个epoch 保存一次模型\n",
    "            fluid.save_dygraph(model.state_dict(), './checkpoint/epoch'+str(epoch))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "##Total dataset instances:  1000209\n",
      "##MovieLens dataset information: \n",
      "usr num: 6040\n",
      "movies num: 3883\n",
      "epoch: 0, batch_id: 0, loss is: [13.394577]\n",
      "epoch: 0, batch_id: 500, loss is: [1.1303637]\n",
      "epoch: 0, batch_id: 1000, loss is: [1.2052544]\n",
      "epoch: 0, batch_id: 1500, loss is: [1.0797948]\n",
      "epoch: 0, batch_id: 2000, loss is: [1.0760863]\n",
      "epoch: 0, batch_id: 2500, loss is: [1.1280993]\n",
      "epoch: 0, batch_id: 3000, loss is: [1.0753525]\n",
      "epoch: 0, batch_id: 3500, loss is: [0.97122294]\n",
      "epoch: 1, batch_id: 0, loss is: [1.0810933]\n",
      "epoch: 1, batch_id: 500, loss is: [0.9668919]\n",
      "epoch: 1, batch_id: 1000, loss is: [0.9860354]\n",
      "epoch: 1, batch_id: 1500, loss is: [0.90936303]\n",
      "epoch: 1, batch_id: 2000, loss is: [0.85691184]\n",
      "epoch: 1, batch_id: 2500, loss is: [0.9519359]\n",
      "epoch: 1, batch_id: 3000, loss is: [1.0360328]\n",
      "epoch: 1, batch_id: 3500, loss is: [0.9829871]\n",
      "epoch: 2, batch_id: 0, loss is: [0.9479978]\n",
      "epoch: 2, batch_id: 500, loss is: [0.89850855]\n",
      "epoch: 2, batch_id: 1000, loss is: [1.0360272]\n",
      "epoch: 2, batch_id: 1500, loss is: [0.8445386]\n",
      "epoch: 2, batch_id: 2000, loss is: [0.9593616]\n",
      "epoch: 2, batch_id: 2500, loss is: [0.8558115]\n",
      "epoch: 2, batch_id: 3000, loss is: [0.9096966]\n",
      "epoch: 2, batch_id: 3500, loss is: [1.1765003]\n",
      "epoch: 3, batch_id: 0, loss is: [0.8820313]\n",
      "epoch: 3, batch_id: 500, loss is: [0.9926759]\n",
      "epoch: 3, batch_id: 1000, loss is: [0.8467046]\n",
      "epoch: 3, batch_id: 1500, loss is: [0.8749664]\n",
      "epoch: 3, batch_id: 2000, loss is: [0.90999544]\n",
      "epoch: 3, batch_id: 2500, loss is: [0.8204515]\n",
      "epoch: 3, batch_id: 3000, loss is: [1.0266018]\n",
      "epoch: 3, batch_id: 3500, loss is: [0.78474224]\n",
      "epoch: 4, batch_id: 0, loss is: [0.8930086]\n",
      "epoch: 4, batch_id: 500, loss is: [0.948531]\n",
      "epoch: 4, batch_id: 1000, loss is: [0.9328145]\n",
      "epoch: 4, batch_id: 1500, loss is: [0.93602]\n",
      "epoch: 4, batch_id: 2000, loss is: [0.87151456]\n",
      "epoch: 4, batch_id: 2500, loss is: [0.89294636]\n",
      "epoch: 4, batch_id: 3000, loss is: [0.8004328]\n",
      "epoch: 4, batch_id: 3500, loss is: [0.9841825]\n",
      "epoch: 5, batch_id: 0, loss is: [0.78890634]\n",
      "epoch: 5, batch_id: 500, loss is: [0.7486305]\n",
      "epoch: 5, batch_id: 1000, loss is: [0.915802]\n",
      "epoch: 5, batch_id: 1500, loss is: [0.8518279]\n",
      "epoch: 5, batch_id: 2000, loss is: [1.0337944]\n",
      "epoch: 5, batch_id: 2500, loss is: [0.8014319]\n",
      "epoch: 5, batch_id: 3000, loss is: [0.9516671]\n",
      "epoch: 5, batch_id: 3500, loss is: [0.8598837]\n",
      "epoch: 6, batch_id: 0, loss is: [0.8316132]\n",
      "epoch: 6, batch_id: 500, loss is: [0.8653033]\n",
      "epoch: 6, batch_id: 1000, loss is: [0.903239]\n",
      "epoch: 6, batch_id: 1500, loss is: [0.81412923]\n",
      "epoch: 6, batch_id: 2000, loss is: [0.9996882]\n",
      "epoch: 6, batch_id: 2500, loss is: [0.8493054]\n",
      "epoch: 6, batch_id: 3000, loss is: [0.91363597]\n",
      "epoch: 6, batch_id: 3500, loss is: [0.86852384]\n",
      "epoch: 7, batch_id: 0, loss is: [1.0169337]\n",
      "epoch: 7, batch_id: 500, loss is: [0.7756157]\n",
      "epoch: 7, batch_id: 1000, loss is: [0.78189886]\n",
      "epoch: 7, batch_id: 1500, loss is: [0.6705141]\n",
      "epoch: 7, batch_id: 2000, loss is: [0.7814908]\n",
      "epoch: 7, batch_id: 2500, loss is: [0.98269534]\n",
      "epoch: 7, batch_id: 3000, loss is: [0.7897848]\n",
      "epoch: 7, batch_id: 3500, loss is: [0.8517717]\n",
      "epoch: 8, batch_id: 0, loss is: [0.7971121]\n",
      "epoch: 8, batch_id: 500, loss is: [0.92838955]\n",
      "epoch: 8, batch_id: 1000, loss is: [0.9234103]\n",
      "epoch: 8, batch_id: 1500, loss is: [0.8324571]\n",
      "epoch: 8, batch_id: 2000, loss is: [0.9747402]\n",
      "epoch: 8, batch_id: 2500, loss is: [0.8739619]\n",
      "epoch: 8, batch_id: 3000, loss is: [0.92065]\n",
      "epoch: 8, batch_id: 3500, loss is: [0.90772855]\n",
      "epoch: 9, batch_id: 0, loss is: [0.859686]\n",
      "epoch: 9, batch_id: 500, loss is: [0.8991824]\n",
      "epoch: 9, batch_id: 1000, loss is: [0.93134415]\n",
      "epoch: 9, batch_id: 1500, loss is: [0.78966516]\n",
      "epoch: 9, batch_id: 2000, loss is: [0.86657584]\n",
      "epoch: 9, batch_id: 2500, loss is: [0.87925625]\n",
      "epoch: 9, batch_id: 3000, loss is: [0.8004453]\n",
      "epoch: 9, batch_id: 3500, loss is: [0.7015592]\n"
     ]
    }
   ],
   "source": [
    "# 启动训练\n",
    "with dygraph.guard():\n",
    "    use_poster, use_mov_title, use_mov_cat, use_age_job = False, True, True, True\n",
    "    model = Model('Recommend', use_poster, use_mov_title, use_mov_cat, use_age_job)\n",
    "    train(model)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "从训练结果来看，loss保持在0.9左右就难以下降了，主要是因为使用的均方差loss，计算得到预测评分和真实评分的均方差，真实评分的数据是1-5之间的整数，评分数据较大导致计算出来的loss也偏大。\n",
    "\n",
    "不过不用担心，我们只是通过训练神经网络提取特征向量，loss只要收敛即可。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "对训练的模型在验证集上做评估，除了训练所使用的Loss之外，还有两个选择：\n",
    "1. 评分预测精度ACC(Accuracy)：将预测的float数字转成整数，计算和真实评分的匹配度。评分误差在0.5分以内的算正确，否则算错误。\n",
    "2. 评分预测误差（Mean Absolut Error）MAE：计算和真实评分之间的平均绝对误差。\n",
    "\n",
    "下面是使用训练集评估这两个指标的代码实现。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "def evaluation(model, params_file_path):\n",
    "    use_gpu = False\n",
    "    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n",
    "\n",
    "    with fluid.dygraph.guard(place):\n",
    "\n",
    "        model_state_dict, _ = fluid.load_dygraph(params_file_path)\n",
    "        model.load_dict(model_state_dict)\n",
    "        model.eval()\n",
    "\n",
    "        acc_set = []\n",
    "        avg_loss_set = []\n",
    "        for idx, data in enumerate(model.valid_loader()):\n",
    "            usr, mov, score_label = data\n",
    "            usr_v = [dygraph.to_variable(var) for var in usr]\n",
    "            mov_v = [dygraph.to_variable(var) for var in mov]\n",
    "\n",
    "            _, _, scores_predict = model(usr_v, mov_v)\n",
    "\n",
    "            pred_scores = scores_predict.numpy()\n",
    "            \n",
    "            avg_loss_set.append(np.mean(np.abs(pred_scores - score_label)))\n",
    "\n",
    "            diff = np.abs(pred_scores - score_label)\n",
    "            diff[diff>0.5] = 1\n",
    "            acc = 1 - np.mean(diff)\n",
    "            acc_set.append(acc)\n",
    "        return np.mean(acc_set), np.mean(avg_loss_set)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ACC: 0.26903684704731673 MAE: 0.8477496\n",
      "ACC: 0.27701370142973386 MAE: 0.83688134\n",
      "ACC: 0.26803226455664025 MAE: 0.83833\n",
      "ACC: 0.27506765631529 MAE: 0.82973194\n",
      "ACC: 0.27239453150675846 MAE: 0.832326\n",
      "ACC: 0.2729191278799986 MAE: 0.8304551\n",
      "ACC: 0.2757129890796466 MAE: 0.82654566\n",
      "ACC: 0.2674353031011728 MAE: 0.837164\n",
      "ACC: 0.2698536791862586 MAE: 0.83152574\n",
      "ACC: 0.274142069541491 MAE: 0.82742715\n"
     ]
    }
   ],
   "source": [
    "param_path = \"./checkpoint/epoch\"\n",
    "for i in range(10):\n",
    "    acc, mae = evaluation(model, param_path+str(i))\n",
    "    print(\"ACC:\", acc, \"MAE:\", mae)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上述结果中，我们采用了ACC和MAE指标测试在验证集上的评分预测的准确性，其中ACC值越大越好，MAE值越小越好。\n",
    "\n",
    "><font size=2>可以看到ACC和MAE的值不是很理想，但是这仅仅是对于评分预测不准确，不能直接衡量推荐结果的准确性。考虑到我们设计的神经网络是为了完成推荐任务而不是评分任务，所以总结一下：\n",
    "<br>1. 只针对预测评分任务来说，我们设计的神经网络结构和损失函数是不合理的，导致评分预测不理想；\n",
    "<br>2. 从损失函数的收敛可以知道网络的训练是有效的。评分预测的好坏不能反应推荐结果的好坏。</font>\n",
    "\n",
    "到这里，我们已经完成了推荐算法的前三步，包括：1. 数据的准备，2. 神经网络的设计，3. 神经网络的训练。\n",
    "\n",
    "目前还需要完成剩余的两个步骤：1. 提取用户、电影数据的特征并保存到本地， 2. 利用保存的特征计算相似度矩阵，利用相似度完成推荐。\n",
    "\n",
    "下面，我们利用训练的神经网络提取数据的特征，进而完成电影推荐，并观察推荐结果是否令人满意。\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 保存特征\n",
    "\n",
    "训练完模型后，我们得到每个用户、电影对应的特征向量，接下来将这些特征向量保存到本地，这样在进行推荐时，不需要使用神经网络重新提取特征，节省时间成本。\n",
    "\n",
    "保存特征的流程是：\n",
    "- 加载预训练好的模型参数。\n",
    "- 输入数据集的数据，提取整个数据集的用户特征和电影特征。注意数据输入到模型前，要先转成内置vairable类型并保证尺寸正确。\n",
    "- 分别得到用户特征向量和电影特征向量，以使用pickle库保存字典形式的特征向量。\n",
    "\n",
    "使用用户和电影ID为索引，以字典格式存储数据，可以通过用户或者电影的ID索引到用户特征和电影特征。\n",
    "\n",
    "下面代码中，我们使用了一个pickle库。pickle库为python提供了一个简单的持久化功能，可以很容易的将Python对象保存到本地，但是缺点是，保存的文件对人来说可读性很差。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3706\n",
      "usr / mov features saved!!!\n"
     ]
    }
   ],
   "source": [
    "from PIL import Image\n",
    "# 加载第三方库Pickle，用来保存Python数据到本地\n",
    "import pickle\n",
    "# 定义特征保存函数\n",
    "def get_usr_mov_features(model, params_file_path, poster_path):\n",
    "    use_gpu = False\n",
    "    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n",
    "    usr_pkl = {}\n",
    "    mov_pkl = {}\n",
    "    \n",
    "    # 定义将list中每个元素转成variable的函数\n",
    "    def list2variable(inputs, shape):\n",
    "        inputs = np.reshape(np.array(inputs).astype(np.int64), shape)\n",
    "        return fluid.dygraph.to_variable(inputs)\n",
    "    \n",
    "    with fluid.dygraph.guard(place):\n",
    "        # 加载模型参数到模型中，设置为验证模式eval（）\n",
    "        model_state_dict, _ = fluid.load_dygraph(params_file_path)\n",
    "        model.load_dict(model_state_dict)\n",
    "        model.eval()\n",
    "        # 获得整个数据集的数据\n",
    "        dataset = model.Dataset.dataset\n",
    "\n",
    "        for i in range(len(dataset)):\n",
    "            # 获得用户数据，电影数据，评分数据  \n",
    "            # 本案例只转换所有在样本中出现过的user和movie，实际中可以使用业务系统中的全量数据\n",
    "            usr_info, mov_info, score = dataset[i]['usr_info'], dataset[i]['mov_info'],dataset[i]['scores']\n",
    "            usrid = str(usr_info['usr_id'])\n",
    "            movid = str(mov_info['mov_id'])\n",
    "\n",
    "            # 获得用户数据，计算得到用户特征，保存在usr_pkl字典中\n",
    "            if usrid not in usr_pkl.keys():\n",
    "                usr_id_v = list2variable(usr_info['usr_id'], [1, 1])\n",
    "                usr_age_v = list2variable(usr_info['age'], [1, 1])\n",
    "                usr_gender_v = list2variable(usr_info['gender'], [1, 1])\n",
    "                usr_job_v = list2variable(usr_info['job'], [1, 1])\n",
    "\n",
    "                usr_in = [usr_id_v, usr_gender_v, usr_age_v, usr_job_v]\n",
    "                usr_feat = model.get_usr_feat(usr_in)\n",
    "\n",
    "                usr_pkl[usrid] = usr_feat.numpy()\n",
    "            \n",
    "            # 获得电影数据，计算得到电影特征，保存在mov_pkl字典中\n",
    "            if movid not in mov_pkl.keys():\n",
    "                mov_id_v = list2variable(mov_info['mov_id'], [1, 1])\n",
    "                mov_tit_v = list2variable(mov_info['title'], [1, 1, 15, 1])\n",
    "                mov_cat_v = list2variable(mov_info['category'], [1, 1, 6, 1])\n",
    "\n",
    "                mov_in = [mov_id_v, mov_cat_v, mov_tit_v, None]\n",
    "                mov_feat = model.get_mov_feat(mov_in)\n",
    "\n",
    "                mov_pkl[movid] = mov_feat.numpy()\n",
    "    \n",
    "    print(len(mov_pkl.keys()))\n",
    "    # 保存特征到本地\n",
    "    pickle.dump(usr_pkl, open('./usr_feat.pkl', 'wb'))\n",
    "    pickle.dump(mov_pkl, open('./mov_feat.pkl', 'wb'))\n",
    "    print(\"usr / mov features saved!!!\")\n",
    "\n",
    "        \n",
    "param_path = \"./checkpoint/epoch7\"\n",
    "poster_path = \"./work/ml-1m/posters/\"\n",
    "get_usr_mov_features(model, param_path, poster_path)        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "保存好有效代表用户和电影的特征向量后，在下一节我们讨论如何基于这两个向量构建推荐系统。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 作业 10-2\n",
    "\n",
    "1. 作业1：以上算法使用了用户与电影的所有特征（除Poster外），可以设计对比实验，验证哪些特征是重要的，把最终的特征挑选出来。\n",
    "为了验证哪些特征起到关键作用， 读者可以启用或弃用其中某些特征，或者加入电影海报特征，观察是否对模型Loss或评价指标有提升。\n",
    "\n",
    "2. 作业2：加入电影海报数据，验证电影海报特征（Poster）对推荐结果的影响，实现并分析推荐结果（有没有效果？为什么？）。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 1.6.0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
