{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "模型设计的代码需要用到上一节数据处理的Python类，定义如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import random\n",
    "class MovieLen(object):\n",
    "    def __init__(self, use_poster):\n",
    "        self.use_poster = use_poster\n",
    "        # 声明每个数据文件的路径\n",
    "        usr_info_path = \"./work/ml-1m/users.dat\"\n",
    "        rating_path = \"./work/ml-1m/new_rating.txt\"\n",
    "\n",
    "        movie_info_path = \"./work/ml-1m/movies.dat\"\n",
    "        self.poster_path = \"./work/ml-1m/posters/\"\n",
    "        # 得到电影数据\n",
    "        self.movie_info, self.movie_cat, self.movie_title = self.get_movie_info(movie_info_path)\n",
    "        # 记录电影的最大ID\n",
    "        self.max_mov_cat = np.max([self.movie_cat[k] for k in self.movie_cat])\n",
    "        self.max_mov_tit = np.max([self.movie_title[k] for k in self.movie_title])\n",
    "        self.max_mov_id = np.max(list(map(int, self.movie_info.keys())))\n",
    "        # 记录用户数据的最大ID\n",
    "        self.max_usr_id = 0\n",
    "        self.max_usr_age = 0\n",
    "        self.max_usr_job = 0\n",
    "        # 得到用户数据\n",
    "        self.usr_info = self.get_usr_info(usr_info_path)\n",
    "        # 得到评分数据\n",
    "        self.rating_info = self.get_rating_info(rating_path)\n",
    "        # 构建数据集 \n",
    "        self.dataset = self.get_dataset(usr_info=self.usr_info,\n",
    "                                        rating_info=self.rating_info,\n",
    "                                        movie_info=self.movie_info)\n",
    "        # 划分数据集，获得数据加载器\n",
    "        self.train_dataset = self.dataset[:int(len(self.dataset)*0.9)]\n",
    "        self.valid_dataset = self.dataset[int(len(self.dataset)*0.9):]\n",
    "        print(\"##Total dataset instances: \", len(self.dataset))\n",
    "        print(\"##MovieLens dataset information: \\nusr num: {}\\n\"\n",
    "              \"movies num: {}\".format(len(self.usr_info),len(self.movie_info)))\n",
    "    # 得到电影数据\n",
    "    def get_movie_info(self, path):\n",
    "        # 打开文件，编码方式选择ISO-8859-1，读取所有数据到data中 \n",
    "        with open(path, 'r', encoding=\"ISO-8859-1\") as f:\n",
    "            data = f.readlines()\n",
    "        # 建立三个字典，分别用户存放电影所有信息，电影的名字信息、类别信息\n",
    "        movie_info, movie_titles, movie_cat = {}, {}, {}\n",
    "        # 对电影名字、类别中不同的单词计数\n",
    "        t_count, c_count = 1, 1\n",
    "\n",
    "        count_tit = {}\n",
    "        # 按行读取数据并处理\n",
    "        for item in data:\n",
    "            item = item.strip().split(\"::\")\n",
    "            v_id = item[0]\n",
    "            v_title = item[1][:-7]\n",
    "            cats = item[2].split('|')\n",
    "            v_year = item[1][-5:-1]\n",
    "\n",
    "            titles = v_title.split()\n",
    "            # 统计电影名字的单词，并给每个单词一个序号，放在movie_titles中\n",
    "            for t in titles:\n",
    "                if t not in movie_titles:\n",
    "                    movie_titles[t] = t_count\n",
    "                    t_count += 1\n",
    "            # 统计电影类别单词，并给每个单词一个序号，放在movie_cat中\n",
    "            for cat in cats:\n",
    "                if cat not in movie_cat:\n",
    "                    movie_cat[cat] = c_count\n",
    "                    c_count += 1\n",
    "            # 补0使电影名称对应的列表长度为15\n",
    "            v_tit = [movie_titles[k] for k in titles]\n",
    "            while len(v_tit)<15:\n",
    "                v_tit.append(0)\n",
    "            # 补0使电影种类对应的列表长度为6\n",
    "            v_cat = [movie_cat[k] for k in cats]\n",
    "            while len(v_cat)<6:\n",
    "                v_cat.append(0)\n",
    "            # 保存电影数据到movie_info中\n",
    "            movie_info[v_id] = {'mov_id': int(v_id),\n",
    "                                'title': v_tit,\n",
    "                                'category': v_cat,\n",
    "                                'years': int(v_year)}\n",
    "        return movie_info, movie_cat, movie_titles\n",
    "\n",
    "    def get_usr_info(self, path):\n",
    "        # 性别转换函数，M-0， F-1\n",
    "        def gender2num(gender):\n",
    "            return 1 if gender == 'F' else 0\n",
    "\n",
    "        # 打开文件，读取所有行到data中\n",
    "        with open(path, 'r') as f:\n",
    "            data = f.readlines()\n",
    "        # 建立用户信息的字典\n",
    "        use_info = {}\n",
    "\n",
    "        max_usr_id = 0\n",
    "        #按行索引数据\n",
    "        for item in data:\n",
    "            # 去除每一行中和数据无关的部分\n",
    "            item = item.strip().split(\"::\")\n",
    "            usr_id = item[0]\n",
    "            # 将字符数据转成数字并保存在字典中\n",
    "            use_info[usr_id] = {'usr_id': int(usr_id),\n",
    "                                'gender': gender2num(item[1]),\n",
    "                                'age': int(item[2]),\n",
    "                                'job': int(item[3])}\n",
    "            self.max_usr_id = max(self.max_usr_id, int(usr_id))\n",
    "            self.max_usr_age = max(self.max_usr_age, int(item[2]))\n",
    "            self.max_usr_job = max(self.max_usr_job, int(item[3]))\n",
    "        return use_info\n",
    "    # 得到评分数据\n",
    "    def get_rating_info(self, path):\n",
    "        # 读取文件里的数据\n",
    "        with open(path, 'r') as f:\n",
    "            data = f.readlines()\n",
    "        # 将数据保存在字典中并返回\n",
    "        rating_info = {}\n",
    "        for item in data:\n",
    "            item = item.strip().split(\"::\")\n",
    "            usr_id,movie_id,score = item[0],item[1],item[2]\n",
    "            if usr_id not in rating_info.keys():\n",
    "                rating_info[usr_id] = {movie_id:float(score)}\n",
    "            else:\n",
    "                rating_info[usr_id][movie_id] = float(score)\n",
    "        return rating_info\n",
    "    # 构建数据集\n",
    "    def get_dataset(self, usr_info, rating_info, movie_info):\n",
    "        trainset = []\n",
    "        for usr_id in rating_info.keys():\n",
    "            usr_ratings = rating_info[usr_id]\n",
    "            for movie_id in usr_ratings:\n",
    "                trainset.append({'usr_info': usr_info[usr_id],\n",
    "                                 'mov_info': movie_info[movie_id],\n",
    "                                 'scores': usr_ratings[movie_id]})\n",
    "        return trainset\n",
    "    \n",
    "    def load_data(self, dataset=None, mode='train'):\n",
    "        use_poster = False\n",
    "\n",
    "        # 定义数据迭代Batch大小\n",
    "        BATCHSIZE = 256\n",
    "\n",
    "        data_length = len(dataset)\n",
    "        index_list = list(range(data_length))\n",
    "        # 定义数据迭代加载器\n",
    "        def data_generator():\n",
    "            # 训练模式下，打乱训练数据\n",
    "            if mode == 'train':\n",
    "                random.shuffle(index_list)\n",
    "            # 声明每个特征的列表\n",
    "            usr_id_list,usr_gender_list,usr_age_list,usr_job_list = [], [], [], []\n",
    "            mov_id_list,mov_tit_list,mov_cat_list,mov_poster_list = [], [], [], []\n",
    "            score_list = []\n",
    "            # 索引遍历输入数据集\n",
    "            for idx, i in enumerate(index_list):\n",
    "                # 获得特征数据保存到对应特征列表中\n",
    "                usr_id_list.append(dataset[i]['usr_info']['usr_id'])\n",
    "                usr_gender_list.append(dataset[i]['usr_info']['gender'])\n",
    "                usr_age_list.append(dataset[i]['usr_info']['age'])\n",
    "                usr_job_list.append(dataset[i]['usr_info']['job'])\n",
    "\n",
    "                mov_id_list.append(dataset[i]['mov_info']['mov_id'])\n",
    "                mov_tit_list.append(dataset[i]['mov_info']['title'])\n",
    "                mov_cat_list.append(dataset[i]['mov_info']['category'])\n",
    "                mov_id = dataset[i]['mov_info']['mov_id']\n",
    "\n",
    "                if use_poster:\n",
    "                    # 不使用图像特征时，不读取图像数据，加快数据读取速度\n",
    "                    poster = Image.open(self.poster_path+'mov_id{}.jpg'.format(str(mov_id[0])))\n",
    "                    poster = poster.resize([64, 64])\n",
    "                    if len(poster.size) <= 2:\n",
    "                        poster = poster.convert(\"RGB\")\n",
    "\n",
    "                    mov_poster_list.append(np.array(poster))\n",
    "\n",
    "                score_list.append(int(dataset[i]['scores']))\n",
    "                # 如果读取的数据量达到当前的batch大小，就返回当前批次\n",
    "                if len(usr_id_list)==BATCHSIZE:\n",
    "                    # 转换列表数据为数组形式，reshape到固定形状\n",
    "                    usr_id_arr = np.expand_dims(np.array(usr_id_list), axis=-1)\n",
    "                    usr_gender_arr = np.expand_dims(np.array(usr_gender_list), axis=-1)\n",
    "                    usr_age_arr = np.expand_dims(np.array(usr_age_list), axis=-1)\n",
    "                    usr_job_arr = np.expand_dims(np.array(usr_job_list), axis=-1)\n",
    "\n",
    "                    mov_id_arr = np.expand_dims(np.array(mov_id_list), axis=-1)\n",
    "                    mov_cat_arr = np.reshape(np.array(mov_cat_list), [BATCHSIZE, 6, 1]).astype(np.int64)\n",
    "                    mov_tit_arr = np.reshape(np.array(mov_tit_list), [BATCHSIZE, 1, 15, 1]).astype(np.int64)\n",
    "\n",
    "\n",
    "                    if use_poster:\n",
    "                        mov_poster_arr = np.reshape(np.array(mov_poster_list)/127.5 - 1, [BATCHSIZE, 3, 64, 64]).astype(np.float32)\n",
    "                    else:\n",
    "                        mov_poster_arr = np.array([0.])\n",
    "\n",
    "                    scores_arr = np.reshape(np.array(score_list), [-1, 1]).astype(np.float32)\n",
    "\n",
    "                    # 放回当前批次数据\n",
    "                    yield [usr_id_arr, usr_gender_arr, usr_age_arr, usr_job_arr], \\\n",
    "                           [mov_id_arr, mov_cat_arr, mov_tit_arr, mov_poster_arr], scores_arr\n",
    "\n",
    "                    # 清空数据\n",
    "                    usr_id_list, usr_gender_list, usr_age_list, usr_job_list = [], [], [], []\n",
    "                    mov_id_list, mov_tit_list, mov_cat_list, score_list = [], [], [], []\n",
    "                    mov_poster_list = []\n",
    "        return data_generator"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 模型设计介绍"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "神经网络模型设计是电影推荐任务中重要的一环。它的作用是提取图像、文本或者语音的特征，利用这些特征完成分类、检测、文本分析等任务。在电影推荐任务中，我们将设计一个神经网络模型，提取用户数据、电影数据的特征向量，然后计算这些向量的相似度，利用相似度的大小去完成推荐。\n",
    "\n",
    "根据第一章中对建模思路的分析，神经网络模型的设计包含如下步骤：\n",
    "1. 分别将用户、电影的多个特征数据转换成特征向量。\n",
    "2. 对这些特征向量，使用全连接层或者卷积层进一步提取特征。\n",
    "3. 将用户、电影多个数据的特征向量融合成一个向量表示，方便进行相似度计算。\n",
    "4. 计算特征之间的相似度。\n",
    "\n",
    "依据这个思路，我们设计一个简单的电影推荐神经网络模型：\n",
    "\n",
    "<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/1f9c0405613446a78481d2ed1443b04ed1c64be060534c9a9877676f969b9e3f\" width=\"700\" ></center>\n",
    "\n",
    "\n",
    "<center><br>图1：网络结构的设计 </br></center>\n",
    "<br></br>\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "该网络结构包含如下内容：\n",
    "\n",
    "1. 首先，提取用户特征和电影特征作为神经网络的输入，其中：\n",
    "\t* 用户特征包含四个属性信息，分别是用户ID、性别、职业和年龄。\n",
    "\t* 电影特征包含三个属性信息，分别是电影ID、电影类型和电影名称。\n",
    "\n",
    "2. 提取用户特征。使用Embedding层将用户ID映射为向量表示，输入全连接层，并对其他三个属性也做类似的处理。然后将四个属性的特征分别全连接并相加。\n",
    "\n",
    "3. 提取电影特征。将电影ID和电影类型映射为向量表示，输入全连接层，电影名字用文本卷积神经网络得到其定长向量表示。然后将三个属性的特征表示分别全连接并相加。\n",
    "\n",
    "4. 得到用户和电影的向量表示后，计算二者的余弦相似度作为个性化推荐系统的打分。最后，用该相似度打分和用户真实打分的均方差作为该回归模型的损失函数。\n",
    "><font size=2>衡量相似度的计算有多种方式，比如计算余弦相似度、皮尔森相关系数、Jaccard相似系数等等，或者通过计算欧几里得距离、曼哈顿距离、明可夫斯基距离等方式计算相似度。余弦相似度是一种简单好用的向量相似度计算方式，通过计算向量之间的夹角余弦值来评估他们的相似度，本节我们使用余弦相似度计算特征之间的相似度。</font>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 为何如此设计网络呢？\n",
    "\n",
    "网络的主体框架已经在第一章中做出了分析，但还有一些细节点没有确定。\n",
    "\n",
    "1. 如何将“数字”转变成“向量”？\n",
    "\n",
    "\t如NLP章节的介绍，使用词嵌入（Embedding）的方式可完成数字转变成向量。\n",
    "\n",
    "2. 如何合并多个向量的信息？例如：如何将用户四个特征（ID、性别、年龄、职业）的向量合并成一个向量？\n",
    "\n",
    "\t最简单的方式是先将不同特征向量（ID 32维、性别 16维、年龄 16维、职业 16维）通过4个全连接层映射到4个等长的向量（200维度），再将4个等长的向量按位相加即可得到1个包含全部信息的向量。\n",
    "\n",
    "\t电影类型的特征是多个数字转变成的多个向量（6个），也可以通过该方式合并成1个向量。\n",
    "\n",
    "3. 如何处理文本信息？\n",
    "\n",
    "\t如NLP章节的介绍，处理文本信息使用卷积神经网络(CNN)和长短记忆神经网络（LSTM）会有较好的效果。因为电影标题是相对简单的短文本，所以我们使用卷积网络结构来处理电影标题。\n",
    "\n",
    "4. 尺寸大小应该如何设计？\n",
    "\t这涉及到信息熵的理念：越丰富的信息，维度越高。所以，信息量较少的原始特征可以用更短的向量表示，例如性别、年龄和职业这三个特征向量均设置成16维，而用户ID和电影ID这样较多信息量的特征设置成32维。综合了4个原始用户特征的向量和综合了3个电影特征的向量均设计成200维度，使得它们可以蕴含更丰富的信息。当然，尺寸大小并没有一贯的最优规律，需要我们根据问题的复杂程度，训练样本量，特征的信息量等多方面信息探索出最有效的设计。\n",
    "\n",
    "第一章的设计思想结合上面几个细节方案，即可得出上图展示的网络结构。\n",
    "\n",
    "接下来我们进入代码实现环节，首先看看如何将数据映射为向量。在自然语言处理中，我们常使用词嵌入（Embedding）的方式完成向量变换。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# Embedding介绍\n",
    "\n",
    "Embedding是一个嵌入层，将输入的非负整数矩阵中的每个数值，转换为具有固定长度的向量。\n",
    "\n",
    "在NLP任务中，更希望把输入文本映射成向量表示，以便神经网络的处理。在数据处理章节，我们已经将用户和电影的特征用数字表示。嵌入层Embedding可以完成数字到向量的映射。\n",
    "\n",
    "飞桨已经支持Embedding的API，该接口根据输入从Embedding矩阵中查询对应Embedding信息，并会根据输入参数size (vocab_size, emb_size)自动构造一个二维embedding矩阵。该API重要参数如下所示，详细介绍可参见[Embedding API接口文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/Embedding_cn.html#embedding)。\n",
    "\n",
    "><font size=2>函数形式：\n",
    "fluid.dygraph.Embedding(size, param_attr)<ul><li>size (tuple|list)：Embedding矩阵的维度。必须包含两个元素，第一个元素是用来表示输入单词的最大数值， 第二个元素是输出embedding的维度。</li><li>param_attr (ParamAttr)：指定Embedding权重参数属性。</li></ul></font>\n",
    "\n",
    "另外，Embedding要求input的最后一维必须等于1，输出Tensor的shape是将输入Tensor shape最后一维的1替换为emb_size。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1]]\n",
      "name _generated_var_0, dtype: VarType.INT64 shape: [1, 1] \tlod: {}\n",
      "\tdim: 1, 1\n",
      "\tlayout: NCHW\n",
      "\tdtype: int64_t\n",
      "\tdata: [1]\n",
      "\n",
      "数字 1 的embedding结果是：  [[-1.2574270e-02 -1.7813936e-02  2.4048444e-02 -9.5336046e-03\n",
      "  -3.0041732e-02  8.9416020e-03 -6.0916562e-03  2.8836165e-02\n",
      "   9.4242208e-03  3.0505095e-02 -2.1623908e-02 -8.9776330e-03\n",
      "   2.7068481e-02 -1.1491105e-02  2.7349539e-02 -2.3191534e-02\n",
      "   8.7333433e-03 -7.4426085e-04 -3.0769784e-02  6.3138232e-03\n",
      "   8.5166916e-03 -2.3271695e-02 -2.6047286e-02  2.0109061e-02\n",
      "  -2.8243922e-03  1.6461056e-02 -1.3039071e-02 -1.1673998e-02\n",
      "  -9.9338591e-05 -1.7489646e-02  2.4660956e-02  1.0923121e-02]] \n",
      "形状是： [1, 32]\n"
     ]
    }
   ],
   "source": [
    "import paddle.fluid as fluid\n",
    "import paddle.fluid.dygraph as dygraph\n",
    "from paddle.fluid.dygraph import FC, Embedding, Conv2D\n",
    "import numpy as np\n",
    "\n",
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    # 声明用户的最大ID，在此基础上加1（算上数字0）\n",
    "    USR_ID_NUM = 6040 + 1\n",
    "    # 声明Embedding 层，将ID映射为32长度的向量\n",
    "    usr_emb = Embedding(\"Embedding\", size=[USR_ID_NUM, 32], is_sparse=False)\n",
    "    # 声明输入数据，将其转成variable, 输入数据的最后一维必须是1\n",
    "    arr_1 = np.array([1], dtype=\"int64\").reshape((-1, 1))\n",
    "    print(arr_1)\n",
    "    arr_pd1 = dygraph.to_variable(arr_1)\n",
    "    print(arr_pd1)\n",
    "    # 计算结果\n",
    "    emb_res = usr_emb(arr_pd1)\n",
    "    # 打印结果\n",
    "    print(\"数字 1 的embedding结果是： \", emb_res.numpy(), \"\\n形状是：\", emb_res.shape)\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "使用Embedding时，需要注意size这个参数：\n",
    "- size。size是包含两个整数元素的列表或者元组。第一个元素为vocab_size(词表大小), 第二个为emb_size（embedding层维度）。使用的ml-1m数据集的用户ID最大为6040，考虑到0的存在，所以这里我们需要将Embedding的输入size的第一个维度设置为6041（=6040+1）。emb_size表示将数据映射为emb_size维度的向量。这里将用户ID数据1转换成了维度为32的向量表示。32是设置的超参数，读者可以自行调整大小。\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "通过上面的代码，我们简单了解了Embedding的工作方式，但是Embedding层是如何将数字映射为高维度的向量的呢？\n",
    "\n",
    "实际上，Embedding层和Conv2D, FC层一样，Embedding层也有可学习的权重，通过矩阵相乘的方法对输入数据进行映射。Embedding中将输入映射成向量的实际步骤是：\n",
    "\n",
    "1. 将输入数据转换成one-hot格式的向量； \n",
    "\n",
    "2. one-hot向量和Embedding层的权重进行矩阵相乘得到Embedding的结果。\n",
    "\n",
    "实现方法如下：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入数据是： [[4]\n",
      " [2]\n",
      " [5]]\n",
      "默认权重初始化embedding层的映射结果是： [[ 0.2686137  -0.2859717   0.18778294 -0.3862218   0.40939522 -0.14037177\n",
      "   0.41838425  0.4629854   0.3223775  -0.089948   -0.16525975  0.01131973\n",
      "  -0.26370063  0.13407785  0.33191448  0.06181371]\n",
      " [-0.1110054  -0.13311398  0.07831687 -0.19230428  0.22932577 -0.19600236\n",
      "   0.4224186   0.16963917  0.01972485  0.01735717  0.05857074 -0.2742863\n",
      "   0.28297132  0.05678505 -0.003939    0.09295917]\n",
      " [-0.3407694   0.04457599 -0.39618546 -0.16263786 -0.25028026  0.3916446\n",
      "   0.02759862 -0.37292507  0.46122503 -0.10431403  0.04657298 -0.07587296\n",
      "  -0.01250583 -0.3061524   0.26596946 -0.14381829]]\n",
      "odict_keys(['Embedding/Embedding_0.w_0'])\n",
      "\n",
      "查看embedding层的权重形状： [10, 16]\n",
      "\n",
      "MSRA初始化权重embedding层的映射结果是： [[-0.4782553  -0.07947789 -0.02561191  0.65712583 -0.7473525   0.0722407\n",
      "   0.5860997   0.27215305  0.03854775 -0.713399   -0.6831846   0.02962136\n",
      "  -0.4899123   0.37784985  0.6100905   1.0570152 ]\n",
      " [-0.37664026  0.91477454 -0.23703295  0.8247893   0.09259942 -0.02810699\n",
      "   0.33700806  0.34389547  0.18288542 -0.04683336 -0.12996046  0.08489241\n",
      "   0.79804283  0.22338013 -0.24743217  0.40757594]\n",
      " [-0.22099511 -0.5780853   0.04733199 -0.20997956 -0.19534603  0.5043875\n",
      "   0.13780253  0.07640146  0.24842748  0.36171657  1.0304726   0.43458623\n",
      "   0.4372708   0.6968391   0.5147774   0.49982354]]\n"
     ]
    }
   ],
   "source": [
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    # 声明用户的最大ID，在此基础上加1（算上数字0）\n",
    "    USR_ID_NUM = 10\n",
    "    # 声明Embedding 层，将ID映射为16长度的向量\n",
    "    usr_emb = Embedding(\"Embedding\", size=[USR_ID_NUM, 16], is_sparse=False)\n",
    "    # 定义输入数据，输入数据为不超过10的整数，将其转成variable, 输入数据的最后一维必须是1\n",
    "    arr = np.random.randint(0, 10, (3)).reshape((-1, 1)).astype('int64')\n",
    "    print(\"输入数据是：\", arr)\n",
    "    arr_pd = dygraph.to_variable(arr)\n",
    "    emb_res = usr_emb(arr_pd)\n",
    "    print(\"默认权重初始化embedding层的映射结果是：\", emb_res.numpy())\n",
    "    \n",
    "    # 观察Embedding层的权重\n",
    "    emb_weights = usr_emb.state_dict()\n",
    "    print(emb_weights.keys())\n",
    "    \n",
    "    print(\"\\n查看embedding层的权重形状：\", emb_weights['Embedding/Embedding_0.w_0'].shape)\n",
    "    \n",
    "    # 声明Embedding 层，将ID映射为16长度的向量，自定义权重初始化方式\n",
    "    # 定义MSRA初始化方式\n",
    "    init = fluid.initializer.MSRAInitializer(uniform=False)\n",
    "    param_attr = fluid.ParamAttr(initializer=init)\n",
    "    \n",
    "    usr_emb2 = Embedding(\"Embedding2\", size=[USR_ID_NUM, 16], param_attr=param_attr)\n",
    "    emb_res = usr_emb2(arr_pd)\n",
    "    print(\"\\nMSRA初始化权重embedding层的映射结果是：\", emb_res.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上面代码中，我们在[0, 10]范围内随机产生了3个整数，因此数据的最大值为整数9，最小为0。因此，输入数据映射为每个one-hot向量的维度是10，定义Embedding权重的第一个维度USR_ID_NUM为10。\n",
    "\n",
    "这里输入的数据shape是[3, 1]，Embedding层的权重形状则是[10, 16]，Embedding在计算时，首先将输入数据转换成one-hot向量，one-hot向量的长度和Embedding层的输入参数size的第一个维度有关。比如这里我们设置的是10，所以输入数据将被转换成维度为[3, 10]的one-hot向量，参数size决定了Embedding层的权重形状。最终维度为[3, 10]的one-hot向量与维度为[10, 16]Embedding权重相乘，得到最终维度为[3, 16]的映射向量。\n",
    "\n",
    "我们也可以对Embeding层的权重进行初始化，如果不设置初始化方式，则采用默认的初始化方式。\n",
    "\n",
    "神经网络处理文本数据时，需要用数字代替文本，Embedding层则是将输入数字数据映射成了高维向量，然后就可以使用卷积、全连接、LSTM等网络层处理数据了，接下来我们开始设计用户和电影数据的特征提取网络。\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 用户特征提取网络\n",
    "\n",
    "理解Embedding后，我们就可以开始构建提取用户特征的神经网络了。\n",
    "\n",
    "<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/102cd0385d9c4d37b5fa874228f23af7ea7b90cba6524863af3f524a2e5c7b1f\" width=\"450\" ></center>\n",
    "\n",
    "\n",
    "用户特征网络主要包括：\n",
    "1. 将用户ID数据映射为向量表示，通过全连接层得到ID特征。\n",
    "2. 将用户性别数据映射为向量表示，通过全连接层得到性别特征。\n",
    "3. 将用户职业数据映射为向量表示，通过全连接层得到职业特征。\n",
    "4. 将用户年龄数据影射喂向量表示，通过全连接层得到年龄特征。\n",
    "5. 融合ID、性别、职业、年龄特征，得到用户的特征表示。\n",
    "\n",
    "在用户特征计算网络中，我们对每个用户数据做embedding处理，然后经过一个全连接层，激活函数使用ReLU，得到用户所有特征后，将特征整合，经过一个全连接层得到最终的用户数据特征，该特征的维度是200维，用于和电影特征计算相似度。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 1. 用户ID特征提取\n",
    "\n",
    "开始构建用户ID的特征提取网络，ID特征提取包括两个部分，首先，使用Embedding将用户ID映射为向量，然后，使用一层全连接层和relu激活函数进一步提取用户ID特征。\n",
    "相比较于电影类别、电影名称，用户ID只包含一个数字，数据更为简单。这里需要考虑将用户ID映射为多少维度的向量合适，使用维度过大的向量表示用户ID容易造成信息冗余，维度过低又不足以表示该用户的特征。理论上来说，如果使用二进制表示用户ID，用户最大ID是6040，小于2的13次方，因此，理论上使用13维度的向量已经足够了，为了让不同ID的向量更具区分性，我们选择将用户ID映射为维度为32维的向量。\n",
    "\n",
    "\n",
    "下面是用户ID特征提取代码实现：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的用户ID是: [[917]\n",
      " [729]]\n",
      "用户ID的特征是： [[0.         0.03820676 0.         0.01453298 0.         0.\n",
      "  0.01567029 0.         0.00028126 0.00054324 0.01752087 0.00336994\n",
      "  0.01169302 0.0093234  0.         0.         0.01241549 0.\n",
      "  0.0189792  0.04839938 0.01415811 0.         0.         0.\n",
      "  0.00365109 0.         0.02819325 0.         0.         0.\n",
      "  0.         0.0018838 ]\n",
      " [0.         0.         0.         0.01760999 0.         0.\n",
      "  0.02301502 0.         0.00010492 0.         0.         0.02536356\n",
      "  0.02390866 0.0024154  0.         0.02786927 0.         0.\n",
      "  0.         0.00311649 0.         0.00683793 0.         0.00913818\n",
      "  0.         0.         0.         0.         0.01072721 0.02428497\n",
      "  0.01224527 0.00599579]] \n",
      "其形状是： [2, 32]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个用户ID数据\n",
    "usr_id_data = np.random.randint(0, 6040, (2)).reshape((-1, 1)).astype('int64')\n",
    "print(\"输入的用户ID是:\", usr_id_data)\n",
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    USR_ID_NUM = 6040 + 1\n",
    "    # 定义用户ID的embedding层和fc层\n",
    "    usr_emb = Embedding(\"id_emb\", [USR_ID_NUM, 32], is_sparse=False)\n",
    "    usr_fc = FC(\"id_fc\", size=32)\n",
    "    \n",
    "    usr_id_var = dygraph.to_variable(usr_id_data)\n",
    "    usr_id_feat = usr_fc(usr_emb(usr_id_var))\n",
    "    usr_id_feat = fluid.layers.relu(usr_id_feat)\n",
    "    print(\"用户ID的特征是：\", usr_id_feat.numpy(), \"\\n其形状是：\", usr_id_feat.shape)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "注意到，将用户ID映射为one-hot向量时，Embedding层参数size的第一个参数是，在用户的最大ID基础上加上1。原因很简单，从上一节数据处理已经发现，用户ID是从1开始计数的，最大的用户ID是6040。并且已经知道通过Embedding映射输入数据时，是先把输入数据转换成one-hot向量。向量中只有一个 1 的向量才被称为one-hot向量，比如，0 用四维的on-hot向量表示是[1, 0 ,0 ,0]，同时，4维的one-hot向量最大只能表示3。所以，要把数字6040用one-hot向量表示，至少需要用6041维度的向量。\n",
    "\n",
    "\n",
    "接下来我们会看到，类似的Embeding层也适用于处理用户性别、年龄和职业，以及电影ID等特征，实现代码均是类似的。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 用户性别特征提取\n",
    "\n",
    "接下来构建用户性别的特征提取网络，同用户ID特征提取步骤，使用Embedding层和全连接层提取用户性别特征。用户性别不同于用户ID数据具备数千数万种不同数据，性别只有两种可能，不需要使用高维度的向量表示用户性别特征，这里我们将用户性别用为16维的向量表示。\n",
    "\n",
    "下面是用户性别特征提取实现："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的用户性别是: [[0]\n",
      " [1]]\n",
      "用户性别特征的数据特征是： [[0.0000000e+00 0.0000000e+00 1.4641075e-01 0.0000000e+00 6.1696693e-02\n",
      "  5.3460824e-01 0.0000000e+00 8.9638315e-02 0.0000000e+00 1.1237986e-01\n",
      "  0.0000000e+00 3.8502523e-01 1.7975052e-01 0.0000000e+00 6.9481003e-01\n",
      "  0.0000000e+00]\n",
      " [0.0000000e+00 5.0918781e-03 0.0000000e+00 3.4076828e-01 3.9827053e-05\n",
      "  5.8201220e-02 3.6740580e-01 0.0000000e+00 3.2990295e-01 1.9802645e-02\n",
      "  1.4360195e-01 3.2810035e-01 8.9142390e-02 0.0000000e+00 0.0000000e+00\n",
      "  1.2751205e-01]] \n",
      "其形状是： [2, 16]\n",
      "\n",
      "性别 0 对应的特征是： [0.         0.         0.14641075 0.         0.06169669 0.53460824\n",
      " 0.         0.08963832 0.         0.11237986 0.         0.38502523\n",
      " 0.17975052 0.         0.69481003 0.        ]\n",
      "性别 1 对应的特征是： [0.0000000e+00 5.0918781e-03 0.0000000e+00 3.4076828e-01 3.9827053e-05\n",
      " 5.8201220e-02 3.6740580e-01 0.0000000e+00 3.2990295e-01 1.9802645e-02\n",
      " 1.4360195e-01 3.2810035e-01 8.9142390e-02 0.0000000e+00 0.0000000e+00\n",
      " 1.2751205e-01]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个用户性别数据\n",
    "usr_gender_data = np.array((0, 1)).reshape(-1, 1).astype('int64')\n",
    "print(\"输入的用户性别是:\", usr_gender_data)\n",
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    # 用户的性别用0， 1 表示\n",
    "    # 性别最大ID是1，所以Embedding层size的第一个参数设置为1 + 1 = 2\n",
    "    USR_ID_NUM = 2\n",
    "    # 对用户性别信息做映射，并紧接着一个FC层\n",
    "    USR_GENDER_DICT_SIZE = 2\n",
    "    usr_gender_emb = Embedding(\"gender_emb\", [USR_GENDER_DICT_SIZE, 16])\n",
    "    usr_gender_fc = FC(\"gender_fc\", 16)\n",
    "    \n",
    "    usr_gender_var = dygraph.to_variable(usr_gender_data)\n",
    "    usr_gender_feat = usr_gender_fc(usr_gender_emb(usr_gender_var))\n",
    "    usr_gender_feat = fluid.layers.relu(usr_gender_feat)\n",
    "    print(\"用户性别特征的数据特征是：\", usr_gender_feat.numpy(), \"\\n其形状是：\", usr_gender_feat.shape)\n",
    "    print(\"\\n性别 0 对应的特征是：\", usr_gender_feat.numpy()[0, :])\n",
    "    print(\"性别 1 对应的特征是：\", usr_gender_feat.numpy()[1, :])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 用户年龄特征提取\n",
    "然后构建用户年龄的特征提取网络，同样采用Embedding层和全连接层的方式提取特征。\n",
    "\n",
    "前面我们了解到年龄数据分布是：\n",
    "* 1: \"Under 18\"\n",
    "* 18: \"18-24\"\n",
    "* 25: \"25-34\"\n",
    "* 35: \"35-44\"\n",
    "* 45: \"45-49\"\n",
    "* 50: \"50-55\"\n",
    "* 56: \"56+\"\n",
    "\n",
    "得知用户年龄最大值为56，这里仍将用户年龄用16维的向量表示。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的用户年龄是: [[ 1]\n",
      " [18]]\n",
      "用户年龄特征的数据特征是： [[0.         0.         0.         0.25378123 0.05124863 0.03770515\n",
      "  0.         0.         0.         0.0533719  0.         0.0742717\n",
      "  0.         0.         0.         0.        ]\n",
      " [0.32321012 0.         0.         0.13603781 0.         0.\n",
      "  0.         0.         0.04270222 0.22385077 0.         0.\n",
      "  0.         0.         0.24460353 0.        ]] \n",
      "其形状是： [2, 16]\n",
      "\n",
      "年龄 1 对应的特征是： [0.         0.         0.         0.25378123 0.05124863 0.03770515\n",
      " 0.         0.         0.         0.0533719  0.         0.0742717\n",
      " 0.         0.         0.         0.        ]\n",
      "年龄 18 对应的特征是： [0.32321012 0.         0.         0.13603781 0.         0.\n",
      " 0.         0.         0.04270222 0.22385077 0.         0.\n",
      " 0.         0.         0.24460353 0.        ]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个用户年龄数据\n",
    "usr_age_data = np.array((1, 18)).reshape(-1, 1).astype('int64')\n",
    "print(\"输入的用户年龄是:\", usr_age_data)\n",
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    # 对用户年龄信息做映射，并紧接着一个FC层\n",
    "    # 年龄的最大ID是56，所以Embedding层size的第一个参数设置为56 + 1 = 57\n",
    "    USR_AGE_DICT_SIZE = 56 + 1\n",
    "    \n",
    "    usr_age_emb = Embedding(\"age_emb\", [USR_AGE_DICT_SIZE, 16])\n",
    "    usr_age_fc = FC(\"age_fc\", 16)\n",
    "    \n",
    "    usr_age = dygraph.to_variable(usr_age_data)\n",
    "    usr_age_feat = usr_age_emb(usr_age)\n",
    "    usr_age_feat = usr_age_fc(usr_age_feat)\n",
    "    usr_age_feat = fluid.layers.relu(usr_age_feat)\n",
    "    \n",
    "    print(\"用户年龄特征的数据特征是：\", usr_age_feat.numpy(), \"\\n其形状是：\", usr_age_feat.shape)\n",
    "    print(\"\\n年龄 1 对应的特征是：\", usr_age_feat.numpy()[0, :])\n",
    "    print(\"年龄 18 对应的特征是：\", usr_age_feat.numpy()[1, :])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 用户职业特征提取\n",
    "\n",
    "参考用户年龄的处理方式实现用户职业的特征提取，同样采用Embedding层和全连接层的方式提取特征。由上一节信息可以得知用户职业的最大数字表示是20。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的用户职业是: [[ 0]\n",
      " [20]]\n",
      "用户年龄特征的数据特征是： [[0.16487825 0.04119981 0.21283698 0.         0.         0.23629026\n",
      "  0.16277236 0.5439653  0.         0.1765569  0.         0.07147542\n",
      "  0.13363902 0.28369215 0.16702755 0.1669505 ]\n",
      " [0.01479961 0.25870875 0.28695026 0.         0.         0.33058694\n",
      "  0.7261139  0.3193245  0.02760086 0.         0.14927603 0.23376565\n",
      "  0.18442614 0.         0.05812178 0.        ]] \n",
      "其形状是： [2, 16]\n",
      "\n",
      "职业 0 对应的特征是： [0.16487825 0.04119981 0.21283698 0.         0.         0.23629026\n",
      " 0.16277236 0.5439653  0.         0.1765569  0.         0.07147542\n",
      " 0.13363902 0.28369215 0.16702755 0.1669505 ]\n",
      "职业 20 对应的特征是： [0.01479961 0.25870875 0.28695026 0.         0.         0.33058694\n",
      " 0.7261139  0.3193245  0.02760086 0.         0.14927603 0.23376565\n",
      " 0.18442614 0.         0.05812178 0.        ]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个用户职业数据\n",
    "usr_job_data = np.array((0, 20)).reshape(-1, 1).astype('int64')\n",
    "print(\"输入的用户职业是:\", usr_job_data)\n",
    "# 创建飞桨动态图的工作空间\n",
    "with dygraph.guard():\n",
    "    # 对用户职业信息做映射，并紧接着一个FC层\n",
    "    # 用户职业的最大ID是20，所以Embedding层size的第一个参数设置为20 + 1 = 21\n",
    "    USR_JOB_DICT_SIZE = 20 + 1\n",
    "    usr_job_emb = Embedding(\"job_emb\", [USR_JOB_DICT_SIZE, 16])\n",
    "    usr_job_fc = FC(\"job_fc\", 16)\n",
    "    \n",
    "    usr_job = dygraph.to_variable(usr_job_data)\n",
    "    usr_job_feat = usr_job_emb(usr_job)\n",
    "    usr_job_feat = usr_job_fc(usr_job_feat)\n",
    "    usr_job_feat = fluid.layers.relu(usr_job_feat)\n",
    "    \n",
    "    print(\"用户年龄特征的数据特征是：\", usr_job_feat.numpy(), \"\\n其形状是：\", usr_job_feat.shape)\n",
    "    print(\"\\n职业 0 对应的特征是：\", usr_job_feat.numpy()[0, :])\n",
    "    print(\"职业 20 对应的特征是：\", usr_job_feat.numpy()[1, :])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 用户特征融合\n",
    "\n",
    "特征融合是一种常用的特征增强手段，通过结合不同特征的长处，达到取长补短的目的。简单的融合方法有：特征（加权）相加、特征级联、特征正交等等。此处使用特征融合是为了将用户的多个特征融合到一起，用单个向量表示每个用户，更方便计算用户与电影的相似度。上文使用Embedding加全连接的方法，分别得到了用户ID、年龄、性别、职业的特征向量，可以使用全连接层将每个特征映射到固定长度，然后进行相加，得到融合特征。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "用户融合后特征的维度是： [2, 200]\n"
     ]
    }
   ],
   "source": [
    "with dygraph.guard():\n",
    "    \n",
    "    FC_ID = FC(\"fc_id\", 200, act='tanh')\n",
    "    FC_GENDER = FC(\"fc_gender\", 200, act='tanh')\n",
    "    FC_AGE = FC(\"fc_age\", 200, act='tanh')\n",
    "    FC_JOB = FC(\"fc_job\", 200, act='tanh')\n",
    "    \n",
    "    # 收集所有的用户特征\n",
    "    _features = [usr_id_feat, usr_job_feat, usr_age_feat, usr_gender_feat]\n",
    "    _features = [k.numpy() for k in _features]\n",
    "    _features = [dygraph.to_variable(k) for k in _features]\n",
    "    \n",
    "    id_feat = FC_ID(_features[0])\n",
    "    job_feat = FC_JOB(_features[1])\n",
    "    age_feat = FC_AGE(_features[2])\n",
    "    genger_feat = FC_GENDER(_features[-1])\n",
    "    \n",
    "    # 对特征求和\n",
    "    usr_feat = id_feat + job_feat + age_feat + genger_feat\n",
    "    print(\"用户融合后特征的维度是：\", usr_feat.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "这里使用全连接层进一步提取特征，而不是直接相加得到用户特征的原因有两点：\n",
    "* 一是用户每个特征数据维度不一致，无法直接相加；\n",
    "* 二是用户每个特征仅使用了一层全连接层，提取特征不充分，多使用一层全连接层能进一步提取特征。而且，这里用高维度（200维）的向量表示用户特征，能包含更多的信息，每个用户特征之间的区分也更明显。\n",
    "\n",
    "上述实现中需要对每个特征都使用一个全连接层，实现较为复杂，一种简单的替换方式是，先将每个用户特征沿着长度维度进行级联，然后使用一个全连接层获得整个的用户特征向量，两种方式的对比见下图：\n",
    "\n",
    "<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/49f83d35fb92408a8f0e07121089b99cc30ecc0c8583429f9e276796287bdb54\" width=\"400\" ></center>\n",
    "<center> 图1:方式1-特征逐个全连接后相加 </center>\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/4da17402baa54818a6caa42c5a431541e839f7c7de4e4b5d911c7751a4e646ad\" width=\"300\" ></center>\n",
    "<center> 图2:方式2-特征级联后使用全连接 </center><br>\n",
    "\n",
    "\n",
    "两种方式均可实现向量的合并，虽然两者的数学公式不同，但它们的表达能力是类似的。\n",
    "\n",
    "\n",
    "下面是方式2的代码实现。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "用户融合后特征的维度是： [2, 200]\n"
     ]
    }
   ],
   "source": [
    "with dygraph.guard():\n",
    "    usr_combined = FC(\"fusion\", 200, act='tanh')\n",
    "    \n",
    "    # 收集所有的用户特征\n",
    "    _features = [usr_id_feat, usr_job_feat, usr_age_feat, usr_gender_feat]\n",
    "    _features = [k.numpy() for k in _features]\n",
    "    _features = [dygraph.to_variable(k) for k in _features]\n",
    "    \n",
    "    # 对特征沿着最后一个维度级联\n",
    "    usr_feat = fluid.layers.concat(input=_features, axis=1)\n",
    "    usr_feat = usr_combined(usr_feat)\n",
    "    print(\"用户融合后特征的维度是：\", usr_feat.shape)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上述代码中，我们使用了[fluid.layers.concat()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/concat_cn.html#cn-api-fluid-layers-concat)这个API，该API有两个参数，一个是列表形式的输入数据，另一个是axis，表示沿着第几个维度将输入数据级联到一起。\n",
    "\n",
    "至此我们已经完成了用户特征提取网络的设计，包括ID特征提取、性别特征提取、年龄特征提取、职业特征提取和特征融合模块，下面我们将所有的模块整合到一起，放到Python类中，完整代码实现如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "##Total dataset instances:  382499\n",
      "##MovieLens dataset information: \n",
      "usr num: 6040\n",
      "movies num: 3883\n",
      "输入的用户ID数据：[[3363]]\n",
      "性别数据：[[1]] \n",
      "年龄数据：[[35]] \n",
      "职业数据[[9]]\n",
      "计算得到的用户特征维度是： [1, 200]\n"
     ]
    }
   ],
   "source": [
    "import random\n",
    "class Model(dygraph.layers.Layer):\n",
    "    def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat, use_age_job):\n",
    "        super(Model, self).__init__(name_scope)\n",
    "        name = self.full_name()\n",
    "        \n",
    "        # 将传入的name信息和bool型参数添加到模型类中\n",
    "        self.use_mov_poster = use_poster\n",
    "        self.use_mov_title = use_mov_title\n",
    "        self.use_usr_age_job = use_age_job\n",
    "        self.use_mov_cat = use_mov_cat\n",
    "        \n",
    "        # 使用上节定义的数据处理类，获取数据集的信息，并构建训练和验证集的数据迭代器\n",
    "        Dataset = MovieLen(self.use_mov_poster)\n",
    "        self.Dataset = Dataset\n",
    "        self.trainset = self.Dataset.train_dataset\n",
    "        self.valset = self.Dataset.valid_dataset\n",
    "        self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')\n",
    "        self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')\n",
    "\n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        USR_ID_NUM = Dataset.max_usr_id + 1\n",
    "        # 对用户ID做映射，并紧接着一个FC层\n",
    "        self.usr_emb = Embedding(name, [USR_ID_NUM, 32], is_sparse=False)\n",
    "        self.usr_fc = FC(name, size=32)\n",
    "        \n",
    "        # 对用户性别信息做映射，并紧接着一个FC层\n",
    "        USR_GENDER_DICT_SIZE = 2\n",
    "        self.usr_gender_emb = Embedding(name, [USR_GENDER_DICT_SIZE, 16])\n",
    "        self.usr_gender_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户年龄信息做映射，并紧接着一个FC层\n",
    "        USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1\n",
    "        self.usr_age_emb = Embedding(name, [USR_AGE_DICT_SIZE, 16])\n",
    "        self.usr_age_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户职业信息做映射，并紧接着一个FC层\n",
    "        USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1\n",
    "        self.usr_job_emb = Embedding(name, [USR_JOB_DICT_SIZE, 16])\n",
    "        self.usr_job_fc = FC(name, 16)\n",
    "        \n",
    "        # 新建一个FC层，用于整合用户数据信息\n",
    "        self.usr_combined = FC(name, 200, act='tanh')\n",
    "    \n",
    "    # 定义计算用户特征的前向运算过程\n",
    "    def get_usr_feat(self, usr_var):\n",
    "        \"\"\" get usr features\"\"\"\n",
    "        # 获取到用户数据\n",
    "        usr_id, usr_gender, usr_age, usr_job = usr_var\n",
    "        # 将用户的ID数据经过embedding和FC计算，得到的特征保存在feats_collect中\n",
    "        feats_collect = []\n",
    "        usr_id = self.usr_emb(usr_id)\n",
    "        usr_id = self.usr_fc(usr_id)\n",
    "        usr_id = fluid.layers.relu(usr_id)\n",
    "        feats_collect.append(usr_id)\n",
    "        \n",
    "        # 计算用户的性别特征，并保存在feats_collect中\n",
    "        usr_gender = self.usr_gender_emb(usr_gender)\n",
    "        usr_gender = self.usr_gender_fc(usr_gender)\n",
    "        usr_gender = fluid.layers.relu(usr_gender)\n",
    "        feats_collect.append(usr_gender)\n",
    "        # 选择是否使用用户的年龄-职业特征\n",
    "        if self.use_usr_age_job:\n",
    "            # 计算用户的年龄特征，并保存在feats_collect中\n",
    "            usr_age = self.usr_age_emb(usr_age)\n",
    "            usr_age = self.usr_age_fc(usr_age)\n",
    "            usr_age = fluid.layers.relu(usr_age)\n",
    "            feats_collect.append(usr_age)\n",
    "            # 计算用户的职业特征，并保存在feats_collect中\n",
    "            usr_job = self.usr_job_emb(usr_job)\n",
    "            usr_job = self.usr_job_fc(usr_job)\n",
    "            usr_job = fluid.layers.relu(usr_job)\n",
    "            feats_collect.append(usr_job)\n",
    "        \n",
    "        # 将用户的特征级联，并通过FC层得到最终的用户特征\n",
    "        usr_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        usr_feat = self.usr_combined(usr_feat)\n",
    "        return usr_feat\n",
    "    \n",
    "#下面使用定义好的数据读取器，实现从用户数据读取到用户特征计算的流程：\n",
    "## 测试用户特征提取网络\n",
    "with dygraph.guard():\n",
    "    model = Model(\"Usr\", use_poster=False, use_mov_title=True, use_mov_cat=True, use_age_job=True)\n",
    "    model.eval()\n",
    "    \n",
    "    data_loader = model.train_loader\n",
    "    \n",
    "    for idx, data in enumerate(data_loader()):\n",
    "        # 获得数据，并转为动态图格式，\n",
    "        usr, mov, score = data\n",
    "        # 只使用每个Batch的第一条数据\n",
    "        usr_v = [var[0:1, :] for var in usr]\n",
    "        \n",
    "        print(\"输入的用户ID数据：{}\\n性别数据：{} \\n年龄数据：{} \\n职业数据{}\".format(*usr_v))\n",
    "        usr_v = [dygraph.to_variable(var) for var in usr_v]\n",
    "        usr_feat = model.get_usr_feat(usr_v)\n",
    "        print(\"计算得到的用户特征维度是：\", usr_feat.shape)\n",
    "        break\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上面使用了向量级联+全连接的方式实现了四个用户特征向量的合并，在下面处理电影特征的部分我们会看到使用另外一种向量合并的方式（向量相加）处理电影类型的特征(6个向量合并成1个向量)。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "\n",
    "# 电影特征提取网络\n",
    "\n",
    "接下来我们构建提取电影特征的神经网络，与用户特征网络结构不同的是，电影的名称和类别均有多个数字信息，我们构建网络时，对这两类特征的处理方式也不同。\n",
    "\n",
    "<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/5167dbbbf479416dae412d063c0a38fcad7e514fceca4fa1a94ee5160c71d44f\"\n",
    "width=\"450\" ></center>\n",
    "\n",
    "\n",
    "电影特征网络主要包括：\n",
    "1. 将电影ID数据映射为向量表示，通过全连接层得到ID特征。\n",
    "2. 将电影类别数据映射为向量表示，对电影类别的向量求和得到类别特征。\n",
    "3. 将电影名称数据映射为向量表示，通过卷积层计算得到名称特征。\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 1. 提取电影ID特征\n",
    "与计算用户ID特征的方式类似，我们通过如下方式实现电影ID特性提取。根据上一节信息得知电影ID的最大值是3952。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的电影ID是: [[1]\n",
      " [2]]\n",
      "计算的电影ID的特征是 [[0.01656137 0.00533426 0.         0.         0.05033536 0.\n",
      "  0.         0.01549008 0.         0.05394403 0.         0.0050888\n",
      "  0.         0.         0.         0.         0.00211088 0.04697474\n",
      "  0.         0.         0.         0.02592899 0.         0.\n",
      "  0.04141827 0.         0.03567821 0.00857352 0.         0.02090432\n",
      "  0.03244991 0.00806769]\n",
      " [0.02426732 0.01427725 0.         0.         0.         0.\n",
      "  0.         0.01754279 0.01448504 0.00088827 0.00526408 0.\n",
      "  0.         0.         0.         0.         0.         0.\n",
      "  0.01942217 0.         0.01887466 0.         0.02336416 0.00379372\n",
      "  0.         0.         0.00112392 0.03524464 0.02226486 0.\n",
      "  0.         0.0144096 ]] \n",
      "其形状是： [2, 32]\n",
      "\n",
      "电影ID为 1 计算得到的特征是：[0.01656137 0.00533426 0.         0.         0.05033536 0.\n",
      " 0.         0.01549008 0.         0.05394403 0.         0.0050888\n",
      " 0.         0.         0.         0.         0.00211088 0.04697474\n",
      " 0.         0.         0.         0.02592899 0.         0.\n",
      " 0.04141827 0.         0.03567821 0.00857352 0.         0.02090432\n",
      " 0.03244991 0.00806769]\n",
      "电影ID为 2 计算得到的特征是：[0.02426732 0.01427725 0.         0.         0.         0.\n",
      " 0.         0.01754279 0.01448504 0.00088827 0.00526408 0.\n",
      " 0.         0.         0.         0.         0.         0.\n",
      " 0.01942217 0.         0.01887466 0.         0.02336416 0.00379372\n",
      " 0.         0.         0.00112392 0.03524464 0.02226486 0.\n",
      " 0.         0.0144096 ]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个电影ID数据\n",
    "mov_id_data = np.array((1, 2)).reshape(-1, 1).astype('int64')\n",
    "with dygraph.guard():\n",
    "    # 对电影ID信息做映射，并紧接着一个FC层\n",
    "    MOV_DICT_SIZE = 3952 + 1\n",
    "    mov_emb = Embedding(\"mov_id_emb\", [MOV_DICT_SIZE, 32])\n",
    "    mov_fc = FC(\"mov_id_fc\", 32)\n",
    "    \n",
    "    \n",
    "    print(\"输入的电影ID是:\", mov_id_data)\n",
    "    mov_id_data = dygraph.to_variable(mov_id_data)\n",
    "    mov_id_feat = mov_fc(mov_emb(mov_id_data))\n",
    "    mov_id_feat = fluid.layers.relu(mov_id_feat)\n",
    "    print(\"计算的电影ID的特征是\", mov_id_feat.numpy(), \"\\n其形状是：\", mov_id_feat.shape)\n",
    "    print(\"\\n电影ID为 {} 计算得到的特征是：{}\".format(mov_id_data.numpy()[0, 0], mov_id_feat.numpy()[0]))\n",
    "    print(\"电影ID为 {} 计算得到的特征是：{}\".format(mov_id_data.numpy()[1, 0], mov_id_feat.numpy()[1]))\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 2. 提取电影类别特征\n",
    "\n",
    "与电影ID数据不同的是，每个电影有多个类别，提取类别特征时，如果对每个类别数据都使用一个全连接层，电影最多的类别数是6，会导致类别特征提取网络参数过多而不利于学习。我们对于电影类别特征提取的处理方式是：\n",
    "1. 通过Embedding网络层将电影类别数字映射为特征向量；\n",
    "2. 对Embedding后的向量沿着类别数量维度进行求和，得到一个类别映射向量；\n",
    "3. 通过一个全连接层计算类别特征向量。\n",
    "\n",
    "数据处理章节已经介绍到，每个电影的类别数量是不固定的，且一个电影最大的类别数量是6，类别数量不足6的通过补0到6维。因此，每个类别的数据维度是6，每个电影类别有6个Embedding向量。我们希望用一个向量就可以表示电影类别，可以对电影类别数量维度降维，\n",
    "这里对6个Embedding向量通过求和的方式降维，得到电影类别的向量表示。\n",
    "\n",
    "下面是电影类别特征提取的实现方法："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入的电影类别是: [[1 2 3 0 0 0]\n",
      " [2 3 4 0 0 0]]\n",
      "计算的电影类别的特征是 [[0.47408935 0.986257   1.3328826  0.         0.06820097 0.\n",
      "  0.         0.92140436 0.         0.9633608  0.         0.4569673\n",
      "  0.40242547 0.         0.7391968  0.         0.         0.01125839\n",
      "  0.         0.12554677 0.         0.         0.         0.5695038\n",
      "  1.0716581  0.03970256 0.         0.         0.56953406 0.18410711\n",
      "  1.2002702  0.5191878 ]\n",
      " [0.08146538 1.280993   1.0304523  0.         0.5903226  0.\n",
      "  0.         0.72602093 0.         0.67210263 0.         0.37822416\n",
      "  0.203895   0.         0.36018944 0.         0.         0.14246006\n",
      "  0.         0.60479134 0.         0.         0.         0.4797134\n",
      "  1.2993885  0.8698921  0.         0.10458324 0.4910287  0.10889157\n",
      "  1.0748899  0.16870977]] \n",
      "其形状是： [2, 32]\n",
      "\n",
      "电影类别为 [1 2 3 0 0 0] 计算得到的特征是：[0.47408935 0.986257   1.3328826  0.         0.06820097 0.\n",
      " 0.         0.92140436 0.         0.9633608  0.         0.4569673\n",
      " 0.40242547 0.         0.7391968  0.         0.         0.01125839\n",
      " 0.         0.12554677 0.         0.         0.         0.5695038\n",
      " 1.0716581  0.03970256 0.         0.         0.56953406 0.18410711\n",
      " 1.2002702  0.5191878 ]\n",
      "\n",
      "电影类别为 [2 3 4 0 0 0] 计算得到的特征是：[0.08146538 1.280993   1.0304523  0.         0.5903226  0.\n",
      " 0.         0.72602093 0.         0.67210263 0.         0.37822416\n",
      " 0.203895   0.         0.36018944 0.         0.         0.14246006\n",
      " 0.         0.60479134 0.         0.         0.         0.4797134\n",
      " 1.2993885  0.8698921  0.         0.10458324 0.4910287  0.10889157\n",
      " 1.0748899  0.16870977]\n"
     ]
    }
   ],
   "source": [
    "# 自定义一个电影类别数据\n",
    "mov_cat_data = np.array(((1, 2, 3, 0, 0, 0), (2, 3, 4, 0, 0, 0))).reshape(2, -1, 1).astype('int64')\n",
    "with dygraph.guard():\n",
    "    # 对电影ID信息做映射，并紧接着一个FC层\n",
    "    MOV_DICT_SIZE = 6 + 1\n",
    "    mov_emb = Embedding(\"mov_cat_emb\", [MOV_DICT_SIZE, 32])\n",
    "    mov_fc = FC(\"mov_cat_fc\", 32)\n",
    "    \n",
    "    print(\"输入的电影类别是:\", mov_cat_data[:, :, 0])\n",
    "    mov_cat_data = dygraph.to_variable(mov_cat_data)\n",
    "    # 1. 通过Embedding映射电影类别数据；\n",
    "    mov_cat_feat = mov_emb(mov_cat_data)\n",
    "    # 2. 对Embedding后的向量沿着类别数量维度进行求和，得到一个类别映射向量；\n",
    "    mov_cat_feat = fluid.layers.reduce_sum(mov_cat_feat, dim=1, keep_dim=False)\n",
    "\n",
    "    # 3. 通过一个全连接层计算类别特征向量。\n",
    "    mov_cat_feat = mov_fc(mov_cat_feat)\n",
    "    mov_cat_feat = fluid.layers.relu(mov_cat_feat)\n",
    "    print(\"计算的电影类别的特征是\", mov_cat_feat.numpy(), \"\\n其形状是：\", mov_cat_feat.shape)\n",
    "    print(\"\\n电影类别为 {} 计算得到的特征是：{}\".format(mov_cat_data.numpy()[0, :, 0], mov_cat_feat.numpy()[0]))\n",
    "    print(\"\\n电影类别为 {} 计算得到的特征是：{}\".format(mov_cat_data.numpy()[1, :, 0], mov_cat_feat.numpy()[1]))\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "因为待合并的6个向量具有相同的维度，所以直接按位相加即可得到综合的向量表示。当然，我们也可以采用向量级联的方式，将6个32维的向量级联成192维的向量，再通过全连接层压缩成32维度，代码实现上要臃肿一些。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 3. 提取电影名称特征\n",
    "\n",
    "与电影类别数据一样，每个电影名称具有多个单词。\n",
    "我们对于电影名称特征提取的处理方式是：\n",
    "1. 通过Embedding映射电影名称数据，得到对应的特征向量；\n",
    "2. 对Embedding后的向量使用卷积层+全连接层进一步提取特征；\n",
    "3. 对特征进行降采样，降低数据维度；\n",
    "\n",
    "提取电影名称特征时，使用了卷积层加全连接层的方式提取特征。这是因为电影名称单词较多，电影名称的最大单词数量是15，如果采用和电影类别同样的处理方式，即沿着数量维度求和，显然会损失很多信息，考虑到15这个维度较高，可以使用卷积层进一步提取特征，同时通过控制卷积层的步长，降低电影名称特征的维度。\n",
    "\n",
    "\n",
    "只是简单的经过一两层卷积全连接层后，特征的维度依然很大，为了得到更低维度的特征向量，有两种方式，一种是利用求和降采样的方式，另一种是继续使用神经网络层进行特征提取并逐渐降低特征维度。这里，我们采用“简单求和”的降采样方式，来降低电影名称特征的维度，通过飞桨的[reduce_sum](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/reduce_sum_cn.html#reduce-sum) API实现。\n",
    "\n",
    "下面是提取电影名称特征的代码实现：\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "电影名称数据的输入形状:  [2, 1, 15, 1]\n",
      "输入通过Embedding层的输出形状:  [2, 1, 15, 32]\n",
      "第一次卷积之后的特征输出形状:  [2, 1, 7, 32]\n",
      "第二次卷积之后的特征输出形状:  [2, 1, 5, 32]\n",
      "reduce_sum降采样后的特征输出形状:  [2, 1, 32]\n",
      "电影名称特征的最终特征输出形状： [2, 32]\n",
      "\n",
      "计算的电影名称的特征是 [[2.6393819e-01 6.5828353e-02 0.0000000e+00 0.0000000e+00 4.2019427e-02\n",
      "  8.1092335e-02 6.5766078e-01 1.7565861e-01 4.8021996e-01 1.5622834e-02\n",
      "  2.8785648e-02 1.2921916e-01 5.2693492e-01 1.4288965e-01 5.2679729e-01\n",
      "  0.0000000e+00 1.9477144e-01 8.7712690e-02 4.3581825e-01 0.0000000e+00\n",
      "  5.6280475e-02 1.9666490e-01 0.0000000e+00 1.5777704e-01 1.0026633e-02\n",
      "  6.2782172e-04 0.0000000e+00 2.8459020e-02 0.0000000e+00 5.3590906e-01\n",
      "  2.1291009e-01 0.0000000e+00]\n",
      " [2.8712243e-01 0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00\n",
      "  1.1786818e-01 7.1932220e-01 2.2620933e-01 4.8021996e-01 0.0000000e+00\n",
      "  4.1548539e-02 1.0500158e-01 5.6594974e-01 5.8143742e-02 4.6612173e-01\n",
      "  0.0000000e+00 2.0611955e-01 4.0807743e-03 4.9674165e-01 2.1871550e-02\n",
      "  0.0000000e+00 1.7206350e-01 0.0000000e+00 2.1421906e-01 0.0000000e+00\n",
      "  6.9273137e-02 7.3066212e-02 2.8459020e-02 0.0000000e+00 5.3590906e-01\n",
      "  1.8334694e-01 0.0000000e+00]] \n",
      "其形状是： [2, 32]\n",
      "\n",
      "电影名称为 [[1]] 计算得到的特征是：[2.6393819e-01 6.5828353e-02 0.0000000e+00 0.0000000e+00 4.2019427e-02\n",
      " 8.1092335e-02 6.5766078e-01 1.7565861e-01 4.8021996e-01 1.5622834e-02\n",
      " 2.8785648e-02 1.2921916e-01 5.2693492e-01 1.4288965e-01 5.2679729e-01\n",
      " 0.0000000e+00 1.9477144e-01 8.7712690e-02 4.3581825e-01 0.0000000e+00\n",
      " 5.6280475e-02 1.9666490e-01 0.0000000e+00 1.5777704e-01 1.0026633e-02\n",
      " 6.2782172e-04 0.0000000e+00 2.8459020e-02 0.0000000e+00 5.3590906e-01\n",
      " 2.1291009e-01 0.0000000e+00]\n",
      "\n",
      "电影名称为 [[2]] 计算得到的特征是：[0.28712243 0.         0.         0.         0.         0.11786818\n",
      " 0.7193222  0.22620933 0.48021996 0.         0.04154854 0.10500158\n",
      " 0.56594974 0.05814374 0.46612173 0.         0.20611955 0.00408077\n",
      " 0.49674165 0.02187155 0.         0.1720635  0.         0.21421906\n",
      " 0.         0.06927314 0.07306621 0.02845902 0.         0.53590906\n",
      " 0.18334694 0.        ]\n"
     ]
    }
   ],
   "source": [
    "# 自定义两个电影名称数据\n",
    "mov_title_data = np.array(((1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), \n",
    "                            (2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))).reshape(2, 1, 15, 1).astype('int64')\n",
    "with dygraph.guard():\n",
    "    # 对电影名称做映射，紧接着FC和pool层\n",
    "    MOV_TITLE_DICT_SIZE = 1000 + 1\n",
    "    mov_title_emb = Embedding(\"mov_title_emb\", [MOV_TITLE_DICT_SIZE, 32], is_sparse=False)\n",
    "    mov_title_conv = Conv2D(\"mov_title_conv1\", 1, filter_size=(3, 1), stride=(2, 1), padding=0, act='relu')\n",
    "    # 使用 3 * 3卷积层代替全连接层\n",
    "    mov_title_conv2 = Conv2D(\"mov_title_conv2\", 1, filter_size=(3, 1), stride=1, padding=0, act='relu')\n",
    "    \n",
    "    mov_title_data = dygraph.to_variable(mov_title_data)\n",
    "    print(\"电影名称数据的输入形状: \", mov_title_data.shape)\n",
    "    # 1. 通过Embedding映射电影名称数据；\n",
    "    mov_title_feat = mov_title_emb(mov_title_data)\n",
    "    print(\"输入通过Embedding层的输出形状: \", mov_title_feat.shape)\n",
    "    # 2. 对Embedding后的向量使用卷积层进一步提取特征；\n",
    "    mov_title_feat = mov_title_conv(mov_title_feat)\n",
    "    print(\"第一次卷积之后的特征输出形状: \", mov_title_feat.shape)\n",
    "    mov_title_feat = mov_title_conv2(mov_title_feat)\n",
    "    print(\"第二次卷积之后的特征输出形状: \", mov_title_feat.shape)\n",
    "    \n",
    "    batch_size = mov_title_data.shape[0]\n",
    "    # 3. 最后对特征进行降采样，；\n",
    "    mov_title_feat = fluid.layers.reduce_sum(mov_title_feat, dim=2, keep_dim=False)\n",
    "    print(\"reduce_sum降采样后的特征输出形状: \", mov_title_feat.shape)\n",
    "    \n",
    "    mov_title_feat = fluid.layers.relu(mov_title_feat)\n",
    "    mov_title_feat = fluid.layers.reshape(mov_title_feat, [batch_size, -1])\n",
    "    print(\"电影名称特征的最终特征输出形状：\", mov_title_feat.shape)\n",
    "\n",
    "    print(\"\\n计算的电影名称的特征是\", mov_title_feat.numpy(), \"\\n其形状是：\", mov_title_feat.shape)\n",
    "    print(\"\\n电影名称为 {} 计算得到的特征是：{}\".format(mov_title_data.numpy()[0,:, 0], mov_title_feat.numpy()[0]))\n",
    "    print(\"\\n电影名称为 {} 计算得到的特征是：{}\".format(mov_title_data.numpy()[1,:, 0], mov_title_feat.numpy()[1]))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上述代码中，通过Embedding层已经获得了维度是[batch， 1， 15， 32]电影名称特征向量，因此，该特征可以视为是通道数量为1的特征图，很适合使用卷积层进一步提取特征。这里我们使用两个3 x 1大小的卷积核的卷积层提取特征，输出通道保持不变，仍然是1。特征维度中15是电影名称数量的维度，使用3 x 1的卷积核，由于卷积感受野的原因，进行卷积时会综合多个名称的特征，同时设置卷积的步长参数stride为(2, 1)，即可对名称数量维度降维，且保持每个名称的向量长度不变，防止过度压缩每个名称特征的信息。\n",
    "\n",
    "从输出结果来看，第一个卷积层之后的输出特征维度依然较大，可以使用第二个卷积层进一步提取特征。获得第二个卷积的特征后，特征的维度已经从7 x 32，降低到了5 x 32，因此可以直接使用求和（向量按位相加）的方式沿着电影名称维度进行降采样（5\\*32 -> 1\\*32），得到最终的电影名称特征向量。 \n",
    "\n",
    "需要注意的是，降采样后的数据尺寸依然比下一层要求的输入向量多出一维 [2, 1, 32]，所以最终输出前需调整下形状。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "####  电影特征融合：\n",
    "与用户特征融合方式相同，电影特征融合采用特征级联加全连接层的方式，将电影特征用一个200维的向量表示。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "用户融合后特征的维度是： [2, 200]\n"
     ]
    }
   ],
   "source": [
    "with dygraph.guard():\n",
    "    mov_combined = FC(\"mov_fusion\", 200, act='tanh')\n",
    "    \n",
    "    # 收集所有的用户特征\n",
    "    _features = [mov_id_feat, mov_cat_feat, mov_title_feat]\n",
    "    _features = [k.numpy() for k in _features]\n",
    "    _features = [dygraph.to_variable(k) for k in _features]\n",
    "    \n",
    "    # 对特征沿着最后一个维度级联\n",
    "    mov_feat = fluid.layers.concat(input=_features, axis=1)\n",
    "    mov_feat = mov_combined(mov_feat)\n",
    "    print(\"用户融合后特征的维度是：\", mov_feat.shape)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "至此已经完成了电影特征提取的网络设计，包括电影ID特征提取、电影类别特征提取、电影名称特征提取。\n",
    "\n",
    "下面将这些模块整合到一个Python类中，完整代码如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class MovModel(dygraph.layers.Layer):\n",
    "    def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat, use_age_job):\n",
    "        super(MovModel, self).__init__(name_scope)\n",
    "        name = self.full_name()\n",
    "        \n",
    "        # 将传入的name信息和bool型参数添加到模型类中\n",
    "        self.use_mov_poster = use_poster\n",
    "        self.use_mov_title = use_mov_title\n",
    "        self.use_usr_age_job = use_age_job\n",
    "        self.use_mov_cat = use_mov_cat\n",
    "        \n",
    "        # 获取数据集的信息，并构建训练和验证集的数据迭代器\n",
    "        Dataset = MovieLen(self.use_mov_poster)\n",
    "        self.Dataset = Dataset\n",
    "        self.trainset = self.Dataset.train_dataset\n",
    "        self.valset = self.Dataset.valid_dataset\n",
    "        self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')\n",
    "        self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')\n",
    "\n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        # 对电影ID信息做映射，并紧接着一个FC层\n",
    "        MOV_DICT_SIZE = Dataset.max_mov_id + 1\n",
    "        self.mov_emb = Embedding(name, [MOV_DICT_SIZE, 32])\n",
    "        self.mov_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影类别做映射\n",
    "        CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1\n",
    "        self.mov_cat_emb = Embedding(name, [CATEGORY_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_cat_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影名称做映射\n",
    "        MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1\n",
    "        self.mov_title_emb = Embedding(name, [MOV_TITLE_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_title_conv = Conv2D(name, 1, filter_size=(3, 1), stride=(2,1), padding=0, act='relu')\n",
    "        self.mov_title_conv2 = Conv2D(name, 1, filter_size=(3, 1), stride=1, padding=0, act='relu')\n",
    "        \n",
    "        # 新建一个FC层，用于整合电影特征\n",
    "        self.mov_concat_embed = FC(name, size=200, act='tanh')\n",
    "        \n",
    "    # 定义电影特征的前向计算过程\n",
    "    def get_mov_feat(self, mov_var):\n",
    "        \"\"\" get movie features\"\"\"\n",
    "        # 获得电影数据\n",
    "        mov_id, mov_cat, mov_title, mov_poster = mov_var\n",
    "        feats_collect = []\n",
    "        # 获得batchsize的大小\n",
    "        batch_size = mov_id.shape[0]\n",
    "        # 计算电影ID的特征，并存在feats_collect中\n",
    "        mov_id = self.mov_emb(mov_id)\n",
    "        mov_id = self.mov_fc(mov_id)\n",
    "        mov_id = fluid.layers.relu(mov_id)\n",
    "        feats_collect.append(mov_id)\n",
    "        \n",
    "        # 如果使用电影的种类数据，计算电影种类特征的映射\n",
    "        if self.use_mov_cat:\n",
    "            # 计算电影种类的特征映射，对多个种类的特征求和得到最终特征\n",
    "            mov_cat = self.mov_cat_emb(mov_cat)\n",
    "            print(mov_title.shape)\n",
    "            mov_cat = fluid.layers.reduce_sum(mov_cat, dim=1, keep_dim=False)\n",
    "\n",
    "            mov_cat = self.mov_cat_fc(mov_cat)\n",
    "            feats_collect.append(mov_cat)\n",
    "\n",
    "        if self.use_mov_title:\n",
    "            # 计算电影名字的特征映射，对特征映射使用卷积计算最终的特征\n",
    "            mov_title = self.mov_title_emb(mov_title)\n",
    "            mov_title = self.mov_title_conv2(self.mov_title_conv(mov_title))\n",
    "            \n",
    "            mov_title = fluid.layers.reduce_sum(mov_title, dim=2, keep_dim=False)\n",
    "            mov_title = fluid.layers.relu(mov_title)\n",
    "            mov_title = fluid.layers.reshape(mov_title, [batch_size, -1])\n",
    "            feats_collect.append(mov_title)\n",
    "            \n",
    "        # 使用一个全连接层，整合所有电影特征，映射为一个200维的特征向量\n",
    "        mov_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        mov_feat = self.mov_concat_embed(mov_feat)\n",
    "        return mov_feat"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "由上述电影特征处理的代码可以观察到：\n",
    "* 电影ID特征的计算方式和用户ID的计算方式相同。\n",
    "* 对于包含多个元素的电影类别数据，采用将所有元素的映射向量求和的结果作为最终的电影类别特征表示。考虑到电影类别的数量有限，这里采用简单的求和特征融合方式。\n",
    "* 对于电影的名称数据，其包含的元素数量多于电影种类元素数量，则采用卷积计算的方式，之后再将计算的特征沿着数据维度进行求和。读者也可自行设计该部分特征计算网络，并观察最终训练结果。\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "下面使用定义好的数据读取器，实现从电影数据中得到电影特征的计算流程："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "##Total dataset instances:  382499\n",
      "##MovieLens dataset information: \n",
      "usr num: 6040\n",
      "movies num: 3883\n",
      "输入的电影ID数据：2961\n",
      "类别数据：[3 7 0 0 0 0] \n",
      "名称数据：[  2  11 666  26   0   0   0   0   0   0   0   0   0   0   0] \n",
      "[1, 1, 15, 1]\n",
      "计算得到的电影特征维度是： [1, 200]\n"
     ]
    }
   ],
   "source": [
    "## 测试电影特征提取网络\n",
    "with dygraph.guard():\n",
    "    model = MovModel(\"Mov\", use_poster=False, use_mov_title=True, use_mov_cat=True, use_age_job=True)\n",
    "    model.eval()\n",
    "    \n",
    "    data_loader = model.train_loader\n",
    "    \n",
    "    for idx, data in enumerate(data_loader()):\n",
    "        # 获得数据，并转为动态图格式，\n",
    "        usr, mov, score = data\n",
    "        # 只使用每个Batch的第一条数据\n",
    "        mov_v = [var[0:1] for var in mov]\n",
    "        \n",
    "        _mov_v = [np.squeeze(var[0:1]) for var in mov]\n",
    "        print(\"输入的电影ID数据：{}\\n类别数据：{} \\n名称数据：{} \".format(*_mov_v))\n",
    "        mov_v = [dygraph.to_variable(var) for var in mov_v]\n",
    "        mov_feat = model.get_mov_feat(mov_v)\n",
    "        print(\"计算得到的电影特征维度是：\", mov_feat.shape)\n",
    "        break\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 相似度计算\n",
    "\n",
    "计算得到用户特征和电影特征后，我们还需要进行特征之间的相似度计算。如果一个用户对某个电影很感兴趣，并给了五分评价，那么该用户和电影对应的特征之间的相似度是很高的。\n",
    "\n",
    "衡量向量距离（相似度）有多种方案：欧式距离、曼哈顿距离、切比雪夫距离、余弦相似度等，本节我们使用忽略尺度信息的余弦相似度构建相关性矩阵。余弦相似度又称为余弦相似性，是通过计算两个向量的夹角余弦值来评估他们的相似度，如下图，两条红色的直线表示两个向量，之间的夹角可以用来表示相似度大小，角度为0时，余弦值为1，表示完全相似。\n",
    "\n",
    "<img src=\"https://ai-studio-static-online.cdn.bcebos.com/7d955048899441aeade18be12ae5a21c2be3b0f6a3e04374a595ba73801eef82\"\n",
    "width=\"300\" >\n",
    "\n",
    "\n",
    "余弦相似度的公式为：\n",
    "\n",
    "$similarity = cos(\\theta) = \\frac{A\\cdot B}{A + B} = \\frac{\\sum_{i}^{n}A_i \\times B_i}{\\sqrt{\\sum_{i}^{n}(A_i)^2 + \\sum_{i}^{n}(B_i)^2}}$\n",
    "\n",
    "\n",
    "下面是计算相似度的实现方法，输入用户的特征和电影特征，计算出两者之间的相似度。另外，我们将用户对电影的评分作为相似度衡量的标准，由于相似度的数据范围是[0, 1]，还需要把计算的相似度扩大到评分数据范围，评分分为1-5共5个档次，所以需要将相似度扩大5倍。飞桨已实现的[scale](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/scale_cn.html#scale) API，可以对输入数据进行缩放。同时计算余弦相似度可以使用[cos_sim](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/cos_sim_cn.html#cos-sim) API完成。\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "相似度是： -0.077025525\n"
     ]
    }
   ],
   "source": [
    "def similarty(usr_feature, mov_feature):\n",
    "    \n",
    "    res = fluid.layers.cos_sim(usr_feature, mov_feature)\n",
    "    res = fluid.layers.scale(res, scale=5)\n",
    "    return usr_feat, mov_feat, res\n",
    "\n",
    "# 使用上文计算得到的用户特征和电影特征计算相似度\n",
    "\n",
    "with fluid.dygraph.guard():\n",
    "    _sim = similarty(usr_feat, mov_feat)\n",
    "    print(\"相似度是：\", np.squeeze(_sim[-1].numpy()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "从结果中我们发现相似度很小，主要有以下原因：\n",
    "1. 神经网络并没有训练；\n",
    "2. 计算相似度的用户数据和电影数据相关性很小。\n",
    "\n",
    "在下一节我们就开始训练，让这个网络能够输出有效的用户特征向量和电影特征向量。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 总结\n",
    "\n",
    "本节中，我们介绍了个性化推荐的模型设计部分。包括用户特征网络、电影特征网络和特征相似度计算三部分。\n",
    "\n",
    "其中，用户特征网络将用户数据映射为固定长度的特征向量，电影特征网络将电影数据映射为固定长度的特征向量，最终利用余弦相似度计算出用户特征和电影特征的相似度。相似度越大，表示用户对该电影更喜欢。\n",
    "\n",
    "以下为模型设计的完整代码："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class Model(dygraph.layers.Layer):\n",
    "    def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat, use_age_job):\n",
    "        super(Model, self).__init__(name_scope)\n",
    "        name = self.full_name()\n",
    "        \n",
    "        # 将传入的name信息和bool型参数添加到模型类中\n",
    "        self.use_mov_poster = use_poster\n",
    "        self.use_mov_title = use_mov_title\n",
    "        self.use_usr_age_job = use_age_job\n",
    "        self.use_mov_cat = use_mov_cat\n",
    "        \n",
    "        # 获取数据集的信息，并构建训练和验证集的数据迭代器\n",
    "        Dataset = MovieLen(self.use_mov_poster)\n",
    "        self.Dataset = Dataset\n",
    "        self.trainset = self.Dataset.train_dataset\n",
    "        self.valset = self.Dataset.valid_dataset\n",
    "        self.train_loader = self.Dataset.load_data(dataset=self.trainset, mode='train')\n",
    "        self.valid_loader = self.Dataset.load_data(dataset=self.valset, mode='valid')\n",
    "\n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        USR_ID_NUM = Dataset.max_usr_id + 1\n",
    "        # 对用户ID做映射，并紧接着一个FC层\n",
    "        self.usr_emb = Embedding(name, [USR_ID_NUM, 32], is_sparse=False)\n",
    "        self.usr_fc = FC(name, size=32)\n",
    "        \n",
    "        # 对用户性别信息做映射，并紧接着一个FC层\n",
    "        USR_GENDER_DICT_SIZE = 2\n",
    "        self.usr_gender_emb = Embedding(name, [USR_GENDER_DICT_SIZE, 16])\n",
    "        self.usr_gender_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户年龄信息做映射，并紧接着一个FC层\n",
    "        USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1\n",
    "        self.usr_age_emb = Embedding(name, [USR_AGE_DICT_SIZE, 16])\n",
    "        self.usr_age_fc = FC(name, 16)\n",
    "        \n",
    "        # 对用户职业信息做映射，并紧接着一个FC层\n",
    "        USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1\n",
    "        self.usr_job_emb = Embedding(name, [USR_JOB_DICT_SIZE, 16])\n",
    "        self.usr_job_fc = FC(name, 16)\n",
    "        \n",
    "        # 新建一个FC层，用于整合用户数据信息\n",
    "        self.usr_combined = FC(name, 200, act='tanh')\n",
    "        \n",
    "        \"\"\" define network layer for embedding usr info \"\"\"\n",
    "        # 对电影ID信息做映射，并紧接着一个FC层\n",
    "        MOV_DICT_SIZE = Dataset.max_mov_id + 1\n",
    "        self.mov_emb = Embedding(name, [MOV_DICT_SIZE, 32])\n",
    "        self.mov_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影类别做映射\n",
    "        CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1\n",
    "        self.mov_cat_emb = Embedding(name, [CATEGORY_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_cat_fc = FC(name, 32)\n",
    "        \n",
    "        # 对电影名称做映射\n",
    "        MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1\n",
    "        self.mov_title_emb = Embedding(name, [MOV_TITLE_DICT_SIZE, 32], is_sparse=False)\n",
    "        self.mov_title_conv = Conv2D(name, 1, filter_size=(3, 1), stride=(2,1), padding=0, act='relu')\n",
    "        self.mov_title_conv2 = Conv2D(name, 1, filter_size=(3, 1), stride=1, padding=0, act='relu')\n",
    "        \n",
    "        # 新建一个FC层，用于整合电影特征\n",
    "        self.mov_concat_embed = FC(name, size=200, act='tanh')\n",
    "        \n",
    "    # 定义计算用户特征的前向运算过程\n",
    "    def get_usr_feat(self, usr_var):\n",
    "        \"\"\" get usr features\"\"\"\n",
    "        # 获取到用户数据\n",
    "        usr_id, usr_gender, usr_age, usr_job = usr_var\n",
    "        # 将用户的ID数据经过embedding和FC计算，得到的特征保存在feats_collect中\n",
    "        feats_collect = []\n",
    "        usr_id = self.usr_emb(usr_id)\n",
    "        usr_id = self.usr_fc(usr_id)\n",
    "        usr_id = fluid.layers.relu(usr_id)\n",
    "        feats_collect.append(usr_id)\n",
    "        \n",
    "        # 计算用户的性别特征，并保存在feats_collect中\n",
    "        usr_gender = self.usr_gender_emb(usr_gender)\n",
    "        usr_gender = self.usr_gender_fc(usr_gender)\n",
    "        usr_gender = fluid.layers.relu(usr_gender)\n",
    "        feats_collect.append(usr_gender)\n",
    "        # 选择是否使用用户的年龄-职业特征\n",
    "        if self.use_usr_age_job:\n",
    "            # 计算用户的年龄特征，并保存在feats_collect中\n",
    "            usr_age = self.usr_age_emb(usr_age)\n",
    "            usr_age = self.usr_age_fc(usr_age)\n",
    "            usr_age = fluid.layers.relu(usr_age)\n",
    "            feats_collect.append(usr_age)\n",
    "            # 计算用户的职业特征，并保存在feats_collect中\n",
    "            usr_job = self.usr_job_emb(usr_job)\n",
    "            usr_job = self.usr_job_fc(usr_job)\n",
    "            usr_job = fluid.layers.relu(usr_job)\n",
    "            feats_collect.append(usr_job)\n",
    "        \n",
    "        # 将用户的特征级联，并通过FC层得到最终的用户特征\n",
    "        usr_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        usr_feat = self.usr_combined(usr_feat)\n",
    "        return usr_feat\n",
    "\n",
    "        # 定义电影特征的前向计算过程\n",
    "    def get_mov_feat(self, mov_var):\n",
    "        \"\"\" get movie features\"\"\"\n",
    "        # 获得电影数据\n",
    "        mov_id, mov_cat, mov_title, mov_poster = mov_var\n",
    "        feats_collect = []\n",
    "        # 获得batchsize的大小\n",
    "        batch_size = mov_id.shape[0]\n",
    "        # 计算电影ID的特征，并存在feats_collect中\n",
    "        mov_id = self.mov_emb(mov_id)\n",
    "        mov_id = self.mov_fc(mov_id)\n",
    "        mov_id = fluid.layers.relu(mov_id)\n",
    "        feats_collect.append(mov_id)\n",
    "        \n",
    "        # 如果使用电影的种类数据，计算电影种类特征的映射\n",
    "        if self.use_mov_cat:\n",
    "            # 计算电影种类的特征映射，对多个种类的特征求和得到最终特征\n",
    "            mov_cat = self.mov_cat_emb(mov_cat)\n",
    "            mov_cat = fluid.layers.reduce_sum(mov_cat, dim=1, keep_dim=False)\n",
    "\n",
    "            mov_cat = self.mov_cat_fc(mov_cat)\n",
    "            feats_collect.append(mov_cat)\n",
    "\n",
    "        if self.use_mov_title:\n",
    "            # 计算电影名字的特征映射，对特征映射使用卷积计算最终的特征\n",
    "            mov_title = self.mov_title_emb(mov_title)\n",
    "            mov_title = self.mov_title_conv2(self.mov_title_conv(mov_title))\n",
    "            mov_title = fluid.layers.reduce_sum(mov_title, dim=2, keep_dim=False)\n",
    "            mov_title = fluid.layers.relu(mov_title)\n",
    "            mov_title = fluid.layers.reshape(mov_title, [batch_size, -1])\n",
    "            feats_collect.append(mov_title)\n",
    "            \n",
    "        # 使用一个全连接层，整合所有电影特征，映射为一个200维的特征向量\n",
    "        mov_feat = fluid.layers.concat(feats_collect, axis=1)\n",
    "        mov_feat = self.mov_concat_embed(mov_feat)\n",
    "        return mov_feat\n",
    "    \n",
    "    # 定义个性化推荐算法的前向计算\n",
    "    def forward(self, usr_var, mov_var):\n",
    "        # 计算用户特征和电影特征\n",
    "        usr_feat = self.get_usr_feat(usr_var)\n",
    "        mov_feat = self.get_mov_feat(mov_var)\n",
    "        # 根据计算的特征计算相似度\n",
    "        res = fluid.layers.cos_sim(usr_feat, mov_feat)\n",
    "        # 将相似度扩大范围到和电影评分相同数据范围\n",
    "        res = fluid.layers.scale(res, scale=5)\n",
    "        return usr_feat, mov_feat, res\n",
    "   "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 1.6.0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
