{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "import numpy as np\n",
    "from collections import Counter\n",
    "import tensorflow as tf\n",
    "\n",
    "import os\n",
    "import pickle\n",
    "import re\n",
    "from tensorflow.python.ops import math_ops"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from urllib.request import urlretrieve\n",
    "from os.path import isfile, isdir\n",
    "from tqdm import tqdm#进度条工具\n",
    "import zipfile\n",
    "import hashlib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_dir = '.'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#处理数据 处理样式：\n",
    "#UserID、Occupation和MovieID不用变。\n",
    "#Gender字段：需要将‘F’和‘M’转换成0和1。\n",
    "#Age字段：要转成7个连续数字0~6。\n",
    "#Genres字段：是分类字段，要转成数字。首先将Genres中的类别转成字符串到数字的字典，然后再将每个电影的Genres字段转成数字列表，因为有些电影是多个Genres的组合。\n",
    "#Title字段：处理方式跟Genres字段一样，首先创建文本到数字的字典，然后将Title中的描述转成数字的列表。另外Title中的年份也需要去掉。\n",
    "#Genres和Title字段需要将长度统一，这样在神经网络中方便处理。空白部分用‘< PAD >’对应的数字填充\n",
    "def load_data(data_dir):\n",
    "    #User数据\n",
    "    users_title = ['UserID', 'Gender', 'Age', 'JobID', 'Zip-code']\n",
    "    users = pd.read_table(data_dir+'/users.dat', sep='::', header=None, names=users_title, engine='python')\n",
    "    users = users.filter(regex='UserID|Gender|Age|JobID')\n",
    "    #转化为narray\n",
    "    users_orig = users.values\n",
    "    #改变User数据中性别和年龄\n",
    "    gender_map = {'F':0, 'M':1}\n",
    "    users['Gender'] = users['Gender'].map(gender_map)\n",
    "    #构造年龄映射\n",
    "    age_map = {val:ii \n",
    "               for ii,val in enumerate(set(users['Age']))}\n",
    "    users['Age'] = users['Age'].map(age_map)\n",
    "    \n",
    "    #读取Movie数据集\n",
    "    movies_title = ['MovieID', 'Title', 'Genres']\n",
    "    movies = pd.read_table(data_dir+'/movies.dat', sep='::', header=None, names=movies_title, engine = 'python')\n",
    "    movies_orig = movies.values\n",
    "    #将Title中的年份去掉\n",
    "    pattern = re.compile(r'^(.*)\\((\\d+)\\)$')\n",
    "    title_map = {val:pattern.match(val).group(1) for ii,val in enumerate(set(movies['Title']))}\n",
    "    movies['Title'] = movies['Title'].map(title_map)\n",
    "    #电影类型转数字字典\n",
    "    genres_set = set()\n",
    "    for val in movies['Genres'].str.split('|'):\n",
    "        genres_set.update(val)\n",
    "    \n",
    "    genres_set.add('<PAD>')#????< PAD >\n",
    "    genres2int = {val:ii \n",
    "                  for ii, val in enumerate(genres_set)}\n",
    "    #样式：{'comedy':1,'tragedy':2....}\n",
    "    #将电影类型转成等长数字列表，长度是18\n",
    "    genres_map = {val:[genres2int[row] \n",
    "                       for row in val.split('|')] \n",
    "                          for ii,val in enumerate(set(movies['Genres']))}\n",
    "    movies['Genres'] = movies['Genres'].map(genres_map)\n",
    "    #geners_map样式{'a|b|c':[num1,num2,num3]}\n",
    "    #如{'Comedy|Drama': [6, 10], 'Adventure|Fantasy|Romance': [16, 3, 13], 'Documentary': [2], 'Sci-Fi|Thriller': [17, 4], 'Horror|Romance': [0, 13], 'Action': [8], \"Children's|Comedy|Drama\": [9, 6, 10], 'Action|Mystery|Thriller': [8, 5, 4],\n",
    "    #将没有的类别填充\n",
    "    for key in genres_map:\n",
    "        for cnt in range(max(genres2int.values()) - len(genres_map[key])):\n",
    "            genres_map[key].insert(len(genres_map[key]) + cnt,genres2int['<PAD>'])\n",
    "    #genres_map样式 {'Comedy|Drama': [6, 10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14],...}\n",
    "    \n",
    "    #电影Title转数字字典\n",
    "    title_set = set()\n",
    "    for val in movies['Title'].str.split():\n",
    "        title_set.update(val)\n",
    "    #title_set是一个单词库\n",
    "    title_set.add('<PAD>')\n",
    "    title2int = {val:ii \n",
    "                 for ii, val in enumerate(title_set)}\n",
    "    #将电影Title转成等长数字列表，长度是15\n",
    "    title_count = 15\n",
    "    title_map = {val:[title2int[row] \n",
    "                      for row in val.split()] \n",
    "                         for ii,val in enumerate(set(movies['Title']))}\n",
    "    \n",
    "    for key in title_map:\n",
    "        for cnt in range(title_count - len(title_map[key])):\n",
    "            title_map[key].insert(len(title_map[key]) + cnt,title2int['<PAD>'])\n",
    "    \n",
    "    movies['Title'] = movies['Title'].map(title_map)\n",
    "    #movies['Title'][1]=[3928, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102, 2102] \n",
    "    #表示某电影名引用了词库中哪几个\n",
    "    #读取评分数据集\n",
    "    ratings_title = ['UserID','MovieID', 'Rating', 'timestamps']\n",
    "    ratings = pd.read_table(data_dir+'/ratings.dat', sep='::', header=None, names=ratings_title, engine = 'python')\n",
    "    #ratings.head()\n",
    "    ratings = ratings.filter(regex='UserID|MovieID|Rating')\n",
    "    \n",
    "    #合并三个表\n",
    "    data = pd.merge(pd.merge(ratings, users), movies)\n",
    "    #data样式：  \n",
    "#             UserID  MovieID  Rating  Gender  Age         Title  （就是所在词库位置）                                            Genres                                                                                      \n",
    "#0             1     1193       5       0      0           [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...    Drama  \n",
    "#1             2     1193       5       1       5          [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...    Drama \n",
    "    \n",
    "    #将数据分成X和y两张表\n",
    "    target_fields = 'Rating'\n",
    "    features_pd, targets_pd = data.drop(target_fields, axis=1), data[target_fields]#分成特征和目标\n",
    "    features = features_pd.values\n",
    "    targets_values = targets_pd.values\n",
    "    #预处理将表格变成了纯数字型的表格\n",
    "    return title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "UserID                                                     2\n",
       "MovieID                                                 1193\n",
       "Rating                                                     5\n",
       "Gender                                                     1\n",
       "Age                                                        5\n",
       "JobID                                                     16\n",
       "Title      [412, 3834, 1057, 3689, 537, 3898, 132, 132, 1...\n",
       "Genres     [14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 1...\n",
       "Name: 1, dtype: object"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = load_data(data_dir)\n",
    "#查看数据样式\n",
    "data.iloc[1]\n",
    "#features[1]\n",
    "#UserID                                                     2\n",
    "# MovieID                                                 1193\n",
    "# Rating                                                     5\n",
    "# Gender                                                     1\n",
    "# Age                                                        5\n",
    "# Title      [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...\n",
    "# Genres     [10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 1...\n",
    "# Name: 1, dtype: object"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import os\n",
    "import pickle#对象序列化的库\n",
    "#保存和启动模型模型\n",
    "def save_params(params):\n",
    "    \"\"\"\n",
    "    Save parameters to file\n",
    "    \"\"\"\n",
    "    pickle.dump(params, open('params.p', 'wb'))\n",
    "\n",
    "def load_params():\n",
    "    \"\"\"\n",
    "    Load parameters from file\n",
    "    \"\"\"\n",
    "    return pickle.load(open('params.p', mode='rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21\n"
     ]
    }
   ],
   "source": [
    "#嵌入矩阵的维度\n",
    "embed_dim = 32\n",
    "#narray.take(num,1)取第num列内容\n",
    "#用户ID个数\n",
    "uid_max = max(features.take(0,1)) + 1 # 6040  #take用来提取narray中的元素\n",
    "\n",
    "#性别个数\n",
    "gender_max = max(features.take(2,1)) + 1 # 1 + 1 = 2\n",
    "\n",
    "#年龄类别个数\n",
    "age_max = max(features.take(3,1)) + 1 # 6 + 1 = 7\n",
    "\n",
    "#职业个数\n",
    "job_max = max(features.take(4,1))+1# 20 + 1 = 21\n",
    "print(job_max)\n",
    "#电影ID个数  \n",
    "movie_id_max = max(features.take(1,1))+1# 3952\n",
    "\n",
    "#电影类型个数\n",
    "\n",
    "movie_categories_max = max(genres2int.values())+ 1 # 18 + 1 = 19 \n",
    "#电影名单词个数\n",
    "movie_title_max = len(title_set) # 5216\n",
    "\n",
    "#对电影类型嵌入向量做加和操作的标志，考虑过使用mean做平均，但是没实现mean\n",
    "combiner = \"sum\"\n",
    "\n",
    "#电影名长度\n",
    "sentences_size = title_count # = 15\n",
    "#文本卷积滑动窗口，分别滑动2, 3, 4, 5个单词\n",
    "window_sizes = {2, 3, 4, 5}\n",
    "#文本卷积核数量\n",
    "filter_num = 8\n",
    "\n",
    "#id映射字典 （raw:inner）\n",
    "movieid2idx = {val[0]:i for i, val in enumerate(movies.values)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Number of Epochs \n",
    "num_epochs = 5\n",
    "# Batch Size\n",
    "batch_size = 256  #?\n",
    "\n",
    "dropout_keep = 0.5\n",
    "# 学习率\n",
    "learning_rate = 0.0001\n",
    "# Show stats for every n number of batches\n",
    "show_every_n_batches = 20\n",
    "\n",
    "save_dir = './save'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义输入的占位符(构建图)\n",
    "#0 UserID                                                     2\n",
    "#1 MovieID                                                 1193\n",
    "# Rating                                                     5\n",
    "#2 Gender                                                     1\n",
    "#3 Age                                                        5\n",
    "#4 Title      [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...\n",
    "#5 Genres     [10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 1...\n",
    "# Name: 1, dtype: object\n",
    "#创建输入层占位符函数\n",
    "def get_inputs():#movies_titles是类似热编码\n",
    "    uid = tf.placeholder(tf.int32, [None, 1], name=\"uid\")\n",
    "    user_gender = tf.placeholder(tf.int32, [None, 1], name=\"user_gender\")\n",
    "    user_age = tf.placeholder(tf.int32, [None, 1], name=\"user_age\")\n",
    "    user_job = tf.placeholder(tf.int32, [None, 1], name=\"user_job\")\n",
    "    \n",
    "    movie_id = tf.placeholder(tf.int32, [None, 1], name=\"movie_id\")\n",
    "    movie_categories = tf.placeholder(tf.int32, [None, 18], name=\"movie_categories\")\n",
    "    movie_titles = tf.placeholder(tf.int32, [None, 15], name=\"movie_titles\")\n",
    "    targets = tf.placeholder(tf.int32, [None, 1], name=\"targets\")\n",
    "    LearningRate = tf.placeholder(tf.float32, name = \"LearningRate\")\n",
    "    dropout_keep_prob = tf.placeholder(tf.float32, name = \"dropout_keep_prob\")\n",
    "    return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, LearningRate, dropout_keep_prob"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "#构建神经网络\n",
    "#定义User的嵌入矩阵  （初始化嵌入层权重）  \n",
    "def get_user_embedding(uid, user_gender, user_age, user_job):\n",
    "    with tf.name_scope(\"user_embedding\"):#tf名称空间\n",
    "        uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name = \"uid_embed_matrix\") #6040*32#？\n",
    "        uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name = \"uid_embed_layer\")\n",
    "\n",
    "        gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1), name= \"gender_embed_matrix\") \n",
    "        gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name = \"gender_embed_layer\")\n",
    " \n",
    "        age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name=\"age_embed_matrix\")\n",
    "        age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name=\"age_embed_layer\")\n",
    " \n",
    "        job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name = \"job_embed_matrix\")\n",
    "        job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name = \"job_embed_layer\")\n",
    "    return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "#将User的嵌入矩阵一起全连接生成User的特征\n",
    "def get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer): \n",
    "    with tf.name_scope(\"user_fc\"):#创建名称空间\n",
    "        #第一层全连接  80*128  转化后为 _*128 激活函数为relu\n",
    "        uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name = \"uid_fc_layer\", activation=tf.nn.relu)#uid_embed_layer激活层\n",
    "        gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name = \"gender_fc_layer\", activation=tf.nn.relu)\n",
    "        age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name =\"age_fc_layer\", activation=tf.nn.relu)\n",
    "        job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name = \"job_fc_layer\", activation=tf.nn.relu)\n",
    "        #神经元数embed_layer\n",
    "        #第二层全连接\n",
    "        user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2)  #(?, 1, 128)组合起来\n",
    "        user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh)  #(?, 1, 200)神经元数200\n",
    "         #fully_connected创建一个名为的变量weights，表示一个完全连接的权重矩阵，乘以它inputs以产生一个 Tensor隐藏单位\n",
    "        user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])\n",
    "    return user_combine_layer, user_combine_layer_flat\n",
    "#定义Movie ID的嵌入矩阵  \n",
    "def get_movie_id_embed_layer(movie_id):\n",
    "    with tf.name_scope(\"movie_embedding\"):  \n",
    "        movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = \"movie_id_embed_matrix\")\n",
    "        movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = \"movie_id_embed_layer\")\n",
    "    return movie_id_embed_layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "#对电影类型的多个嵌入向量做加和\n",
    "def get_movie_categories_layers(movie_categories):#???????????????可能电影种类load错了\n",
    "    with tf.name_scope(\"movie_categories_layers\"):\n",
    "        movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = \"movie_categories_embed_matrix\")\n",
    "        movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = \"movie_categories_embed_layer\")#?\n",
    "        \n",
    "        if combiner == \"sum\":\n",
    "            movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)\n",
    "    #     elif combiner == \"mean\":\n",
    " \n",
    "    return movie_categories_embed_layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Movie Title的文本卷积网络实现\n",
    "def get_movie_cnn_layer(movie_titles): #movie_titles是类似热编码 [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...\n",
    "    #从嵌入矩阵中得到电影名对应的各个单词的嵌入向量\n",
    "    with tf.name_scope(\"movie_embedding\"):\n",
    "        movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1), name = \"movie_title_embed_matrix\")\n",
    "        movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles, name = \"movie_title_embed_layer\")#？\n",
    "        movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)#通道数为1\n",
    "        \n",
    "        # 't' is a tensor of shape [2]\n",
    "         #shape(expand_dims(t, -1)) ==> [2, 1]\n",
    "            \n",
    "    #对文本嵌入层使用不同尺寸的卷积核做卷积和最大池化\n",
    "    pool_layer_lst = []\n",
    "    for window_size in window_sizes:\n",
    "        with tf.name_scope(\"movie_txt_conv_maxpool_{}\".format(window_size)):#做卷积运算和做池化\n",
    "            filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num],stddev=0.1),name = \"filter_weights\")\n",
    "            filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name=\"filter_bias\")\n",
    " \n",
    "            conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1,1,1,1], padding=\"VALID\", name=\"conv_layer\")\n",
    "            relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer,filter_bias), name =\"relu_layer\")\n",
    " \n",
    "            maxpool_layer = tf.nn.max_pool(relu_layer, [1,sentences_size - window_size + 1 ,1,1], [1,1,1,1], padding=\"VALID\", name=\"maxpool_layer\")\n",
    "            pool_layer_lst.append(maxpool_layer)#所有池化成的二维矩阵\n",
    " \n",
    "    #Dropout层\n",
    "    with tf.name_scope(\"pool_dropout\"):#？\n",
    "        pool_layer = tf.concat(pool_layer_lst, 3, name =\"pool_layer\")\n",
    "        max_num = len(window_sizes) * filter_num\n",
    "        pool_layer_flat = tf.reshape(pool_layer , [-1, 1, max_num], name = \"pool_layer_flat\")\n",
    " \n",
    "        dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name = \"dropout_layer\")\n",
    "    return pool_layer_flat, dropout_layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-19-5aa3982178c0>:8: calling reduce_sum (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "keep_dims is deprecated, use keepdims instead\n"
     ]
    }
   ],
   "source": [
    "#将Movie的各个层一起做全连接\n",
    "def get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):\n",
    "    with tf.name_scope(\"movie_fc\"):\n",
    "        #第一层全连接\n",
    "        movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name = \"movie_id_fc_layer\", activation=tf.nn.relu)\n",
    "        movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim, name = \"movie_categories_fc_layer\", activation=tf.nn.relu)\n",
    " \n",
    "        #第二层全连接\n",
    "        movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2)  #(?, 1, 96)\n",
    "        movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh)  #(?, 1, 200)\n",
    " \n",
    "        movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])\n",
    "    return movie_combine_layer, movie_combine_layer_flat\n",
    "#构建计算图\n",
    "tf.reset_default_graph()\n",
    "train_graph = tf.Graph()\n",
    "with train_graph.as_default():\n",
    "    #获取输入占位符\n",
    "    uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()\n",
    "    #获取User的4个嵌入向量\n",
    "    uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)\n",
    "    #得到用户特征\n",
    "    user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)\n",
    "    #获取电影ID的嵌入向量\n",
    "    movie_id_embed_layer = get_movie_id_embed_layer(movie_id)\n",
    "    #获取电影类型的嵌入向量\n",
    "    movie_categories_embed_layer = get_movie_categories_layers(movie_categories)\n",
    "    #获取电影名的特征向量\n",
    "    pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)\n",
    "    #得到电影特征\n",
    "    movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer, \n",
    "                                                                                movie_categories_embed_layer, \n",
    "                                                                                dropout_layer)\n",
    "    #计算出评分，要注意两个不同的方案，inference的名字（name值）是不一样的，后面做推荐时要根据name取得tensor\n",
    "    with tf.name_scope(\"inference\"):\n",
    "        #将用户特征和电影特征作为输入，经过全连接，输出一个值的方案\n",
    "#         inference_layer = tf.concat([user_combine_layer_flat, movie_combine_layer_flat], 1)  #(?, 200)\n",
    "#         inference = tf.layers.dense(inference_layer, 1,\n",
    "#                                     kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), \n",
    "#                                     kernel_regularizer=tf.nn.l2_loss, name=\"inference\")\n",
    "        #简单的将用户特征和电影特征做矩阵乘法得到一个预测评分\n",
    "        inference = tf.matmul(user_combine_layer_flat, tf.transpose(movie_combine_layer_flat))#[1*200]*[1*200]\n",
    " \n",
    "    with tf.name_scope(\"loss\"):\n",
    "        # MSE损失，将计算值回归到评分\n",
    "        cost = tf.losses.mean_squared_error(targets, inference )\n",
    "        loss = tf.reduce_mean(cost)\n",
    "    # 优化损失 \n",
    "#     train_op = tf.train.AdamOptimizer(lr).minimize(loss)  #cost\n",
    "    global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "    optimizer = tf.train.AdamOptimizer(lr)#lr学习率\n",
    "    gradients = optimizer.compute_gradients(loss)  #cost\n",
    "    train_op = optimizer.apply_gradients(gradients, global_step=global_step)# global_step全局脚步？改变学习率的参数？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Writing to C:\\Users\\abc\\Desktop\\model\\runs\\1545901065\n",
      "\n",
      "2018-12-27T16:57:48.344203: Epoch   0 Batch    0/3125   train_loss = 17.714\n",
      "2018-12-27T16:57:49.551975: Epoch   0 Batch   20/3125   train_loss = 4.439\n",
      "2018-12-27T16:57:50.811609: Epoch   0 Batch   40/3125   train_loss = 2.912\n",
      "2018-12-27T16:57:51.989459: Epoch   0 Batch   60/3125   train_loss = 2.321\n",
      "2018-12-27T16:57:53.155343: Epoch   0 Batch   80/3125   train_loss = 2.199\n",
      "2018-12-27T16:57:54.387050: Epoch   0 Batch  100/3125   train_loss = 1.893\n",
      "2018-12-27T16:57:55.659648: Epoch   0 Batch  120/3125   train_loss = 1.925\n",
      "2018-12-27T16:57:56.986105: Epoch   0 Batch  140/3125   train_loss = 1.701\n",
      "2018-12-27T16:57:58.303580: Epoch   0 Batch  160/3125   train_loss = 1.593\n",
      "2018-12-27T16:57:59.488414: Epoch   0 Batch  180/3125   train_loss = 1.583\n",
      "2018-12-27T16:58:00.692196: Epoch   0 Batch  200/3125   train_loss = 1.735\n",
      "2018-12-27T16:58:01.864062: Epoch   0 Batch  220/3125   train_loss = 1.674\n",
      "2018-12-27T16:58:03.052884: Epoch   0 Batch  240/3125   train_loss = 1.464\n",
      "2018-12-27T16:58:04.245695: Epoch   0 Batch  260/3125   train_loss = 1.564\n",
      "2018-12-27T16:58:05.503333: Epoch   0 Batch  280/3125   train_loss = 1.614\n",
      "2018-12-27T16:58:06.714109: Epoch   0 Batch  300/3125   train_loss = 1.498\n",
      "2018-12-27T16:58:07.942813: Epoch   0 Batch  320/3125   train_loss = 1.569\n",
      "2018-12-27T16:58:09.119668: Epoch   0 Batch  340/3125   train_loss = 1.346\n",
      "2018-12-27T16:58:10.328436: Epoch   0 Batch  360/3125   train_loss = 1.442\n",
      "2018-12-27T16:58:11.497310: Epoch   0 Batch  380/3125   train_loss = 1.446\n",
      "2018-12-27T16:58:12.679151: Epoch   0 Batch  400/3125   train_loss = 1.277\n",
      "2018-12-27T16:58:13.878945: Epoch   0 Batch  420/3125   train_loss = 1.307\n",
      "2018-12-27T16:58:15.064774: Epoch   0 Batch  440/3125   train_loss = 1.431\n",
      "2018-12-27T16:58:16.464035: Epoch   0 Batch  460/3125   train_loss = 1.469\n",
      "2018-12-27T16:58:17.879250: Epoch   0 Batch  480/3125   train_loss = 1.479\n",
      "2018-12-27T16:58:19.104974: Epoch   0 Batch  500/3125   train_loss = 1.085\n",
      "2018-12-27T16:58:20.643409: Epoch   0 Batch  520/3125   train_loss = 1.365\n",
      "2018-12-27T16:58:22.014740: Epoch   0 Batch  540/3125   train_loss = 1.249\n",
      "2018-12-27T16:58:23.554619: Epoch   0 Batch  560/3125   train_loss = 1.467\n",
      "2018-12-27T16:58:24.962854: Epoch   0 Batch  580/3125   train_loss = 1.456\n",
      "2018-12-27T16:58:26.364109: Epoch   0 Batch  600/3125   train_loss = 1.437\n",
      "2018-12-27T16:58:27.757383: Epoch   0 Batch  620/3125   train_loss = 1.483\n",
      "2018-12-27T16:58:29.048931: Epoch   0 Batch  640/3125   train_loss = 1.416\n",
      "2018-12-27T16:58:30.627711: Epoch   0 Batch  660/3125   train_loss = 1.372\n",
      "2018-12-27T16:58:31.907290: Epoch   0 Batch  680/3125   train_loss = 1.217\n",
      "2018-12-27T16:58:33.287600: Epoch   0 Batch  700/3125   train_loss = 1.350\n",
      "2018-12-27T16:58:34.494374: Epoch   0 Batch  720/3125   train_loss = 1.248\n",
      "2018-12-27T16:58:35.749021: Epoch   0 Batch  740/3125   train_loss = 1.377\n",
      "2018-12-27T16:58:36.985714: Epoch   0 Batch  760/3125   train_loss = 1.422\n",
      "2018-12-27T16:58:38.182515: Epoch   0 Batch  780/3125   train_loss = 1.429\n",
      "2018-12-27T16:58:39.439156: Epoch   0 Batch  800/3125   train_loss = 1.313\n",
      "2018-12-27T16:58:40.705771: Epoch   0 Batch  820/3125   train_loss = 1.319\n",
      "2018-12-27T16:58:42.093077: Epoch   0 Batch  840/3125   train_loss = 1.276\n",
      "2018-12-27T16:58:43.267921: Epoch   0 Batch  860/3125   train_loss = 1.206\n",
      "2018-12-27T16:58:44.582407: Epoch   0 Batch  880/3125   train_loss = 1.291\n",
      "2018-12-27T16:58:45.980669: Epoch   0 Batch  900/3125   train_loss = 1.237\n",
      "2018-12-27T16:58:47.296152: Epoch   0 Batch  920/3125   train_loss = 1.317\n",
      "2018-12-27T16:58:48.726328: Epoch   0 Batch  940/3125   train_loss = 1.416\n",
      "2018-12-27T16:58:49.903183: Epoch   0 Batch  960/3125   train_loss = 1.350\n",
      "2018-12-27T16:58:51.168800: Epoch   0 Batch  980/3125   train_loss = 1.389\n",
      "2018-12-27T16:58:52.427435: Epoch   0 Batch 1000/3125   train_loss = 1.311\n",
      "2018-12-27T16:58:53.846641: Epoch   0 Batch 1020/3125   train_loss = 1.363\n",
      "2018-12-27T16:58:55.313719: Epoch   0 Batch 1040/3125   train_loss = 1.342\n",
      "2018-12-27T16:58:56.791780: Epoch   0 Batch 1060/3125   train_loss = 1.518\n",
      "2018-12-27T16:58:58.118222: Epoch   0 Batch 1080/3125   train_loss = 1.180\n",
      "2018-12-27T16:58:59.616222: Epoch   0 Batch 1100/3125   train_loss = 1.400\n",
      "2018-12-27T16:59:01.058363: Epoch   0 Batch 1120/3125   train_loss = 1.277\n",
      "2018-12-27T16:59:02.390801: Epoch   0 Batch 1140/3125   train_loss = 1.343\n",
      "2018-12-27T16:59:03.996538: Epoch   0 Batch 1160/3125   train_loss = 1.334\n",
      "2018-12-27T16:59:05.422737: Epoch   0 Batch 1180/3125   train_loss = 1.327\n",
      "2018-12-27T16:59:07.232886: Epoch   0 Batch 1200/3125   train_loss = 1.301\n",
      "2018-12-27T16:59:08.461601: Epoch   0 Batch 1220/3125   train_loss = 1.232\n",
      "2018-12-27T16:59:09.751154: Epoch   0 Batch 1240/3125   train_loss = 1.101\n",
      "2018-12-27T16:59:11.129469: Epoch   0 Batch 1260/3125   train_loss = 1.243\n",
      "2018-12-27T16:59:12.361178: Epoch   0 Batch 1280/3125   train_loss = 1.291\n",
      "2018-12-27T16:59:13.780385: Epoch   0 Batch 1300/3125   train_loss = 1.291\n",
      "2018-12-27T16:59:15.188618: Epoch   0 Batch 1320/3125   train_loss = 1.210\n",
      "2018-12-27T16:59:16.516071: Epoch   0 Batch 1340/3125   train_loss = 1.146\n",
      "2018-12-27T16:59:17.697913: Epoch   0 Batch 1360/3125   train_loss = 1.191\n",
      "2018-12-27T16:59:19.023369: Epoch   0 Batch 1380/3125   train_loss = 1.115\n",
      "2018-12-27T16:59:20.201219: Epoch   0 Batch 1400/3125   train_loss = 1.295\n",
      "2018-12-27T16:59:21.543629: Epoch   0 Batch 1420/3125   train_loss = 1.347\n",
      "2018-12-27T16:59:22.891028: Epoch   0 Batch 1440/3125   train_loss = 1.194\n",
      "2018-12-27T16:59:24.407973: Epoch   0 Batch 1460/3125   train_loss = 1.303\n",
      "2018-12-27T16:59:25.758363: Epoch   0 Batch 1480/3125   train_loss = 1.346\n",
      "2018-12-27T16:59:27.169591: Epoch   0 Batch 1500/3125   train_loss = 1.348\n",
      "2018-12-27T16:59:28.516010: Epoch   0 Batch 1520/3125   train_loss = 1.287\n",
      "2018-12-27T16:59:30.284265: Epoch   0 Batch 1540/3125   train_loss = 1.344\n",
      "2018-12-27T16:59:31.866049: Epoch   0 Batch 1560/3125   train_loss = 1.263\n",
      "2018-12-27T16:59:33.407914: Epoch   0 Batch 1580/3125   train_loss = 1.333\n",
      "2018-12-27T16:59:34.792214: Epoch   0 Batch 1600/3125   train_loss = 1.379\n",
      "2018-12-27T16:59:36.238349: Epoch   0 Batch 1620/3125   train_loss = 1.273\n",
      "2018-12-27T16:59:37.639602: Epoch   0 Batch 1640/3125   train_loss = 1.359\n",
      "2018-12-27T16:59:39.257281: Epoch   0 Batch 1660/3125   train_loss = 1.335\n",
      "2018-12-27T16:59:40.750286: Epoch   0 Batch 1680/3125   train_loss = 1.263\n",
      "2018-12-27T16:59:41.950084: Epoch   0 Batch 1700/3125   train_loss = 1.213\n",
      "2018-12-27T16:59:43.197744: Epoch   0 Batch 1720/3125   train_loss = 1.229\n",
      "2018-12-27T16:59:44.513227: Epoch   0 Batch 1740/3125   train_loss = 1.271\n",
      "2018-12-27T16:59:45.746929: Epoch   0 Batch 1760/3125   train_loss = 1.404\n",
      "2018-12-27T16:59:46.884940: Epoch   0 Batch 1780/3125   train_loss = 1.169\n",
      "2018-12-27T16:59:48.064785: Epoch   0 Batch 1800/3125   train_loss = 1.270\n",
      "2018-12-27T16:59:49.217704: Epoch   0 Batch 1820/3125   train_loss = 1.226\n",
      "2018-12-27T16:59:50.769561: Epoch   0 Batch 1840/3125   train_loss = 1.304\n",
      "2018-12-27T16:59:52.187766: Epoch   0 Batch 1860/3125   train_loss = 1.325\n",
      "2018-12-27T16:59:53.472330: Epoch   0 Batch 1880/3125   train_loss = 1.285\n",
      "2018-12-27T16:59:54.912480: Epoch   0 Batch 1900/3125   train_loss = 1.119\n",
      "2018-12-27T16:59:56.410476: Epoch   0 Batch 1920/3125   train_loss = 1.204\n",
      "2018-12-27T16:59:57.967313: Epoch   0 Batch 1940/3125   train_loss = 1.171\n",
      "2018-12-27T16:59:59.517171: Epoch   0 Batch 1960/3125   train_loss = 1.203\n",
      "2018-12-27T17:00:00.886510: Epoch   0 Batch 1980/3125   train_loss = 1.202\n",
      "2018-12-27T17:00:02.144149: Epoch   0 Batch 2000/3125   train_loss = 1.405\n",
      "2018-12-27T17:00:03.406773: Epoch   0 Batch 2020/3125   train_loss = 1.332\n",
      "2018-12-27T17:00:04.895793: Epoch   0 Batch 2040/3125   train_loss = 1.180\n",
      "2018-12-27T17:00:06.534412: Epoch   0 Batch 2060/3125   train_loss = 1.051\n",
      "2018-12-27T17:00:07.711266: Epoch   0 Batch 2080/3125   train_loss = 1.380\n",
      "2018-12-27T17:00:08.877149: Epoch   0 Batch 2100/3125   train_loss = 1.238\n",
      "2018-12-27T17:00:10.095892: Epoch   0 Batch 2120/3125   train_loss = 1.160\n",
      "2018-12-27T17:00:11.255791: Epoch   0 Batch 2140/3125   train_loss = 1.251\n",
      "2018-12-27T17:00:12.478522: Epoch   0 Batch 2160/3125   train_loss = 1.215\n",
      "2018-12-27T17:00:13.616480: Epoch   0 Batch 2180/3125   train_loss = 1.241\n",
      "2018-12-27T17:00:14.736486: Epoch   0 Batch 2200/3125   train_loss = 1.156\n",
      "2018-12-27T17:00:16.001105: Epoch   0 Batch 2220/3125   train_loss = 1.234\n",
      "2018-12-27T17:00:17.130088: Epoch   0 Batch 2240/3125   train_loss = 1.031\n",
      "2018-12-27T17:00:18.525358: Epoch   0 Batch 2260/3125   train_loss = 1.184\n",
      "2018-12-27T17:00:19.955534: Epoch   0 Batch 2280/3125   train_loss = 1.288\n",
      "2018-12-27T17:00:21.367758: Epoch   0 Batch 2300/3125   train_loss = 1.299\n",
      "2018-12-27T17:00:22.671275: Epoch   0 Batch 2320/3125   train_loss = 1.397\n",
      "2018-12-27T17:00:24.120401: Epoch   0 Batch 2340/3125   train_loss = 1.306\n",
      "2018-12-27T17:00:25.758024: Epoch   0 Batch 2360/3125   train_loss = 1.312\n",
      "2018-12-27T17:00:27.261005: Epoch   0 Batch 2380/3125   train_loss = 1.218\n",
      "2018-12-27T17:00:28.741048: Epoch   0 Batch 2400/3125   train_loss = 1.305\n",
      "2018-12-27T17:00:29.930871: Epoch   0 Batch 2420/3125   train_loss = 1.206\n",
      "2018-12-27T17:00:31.161578: Epoch   0 Batch 2440/3125   train_loss = 1.300\n",
      "2018-12-27T17:00:32.463098: Epoch   0 Batch 2460/3125   train_loss = 1.220\n",
      "2018-12-27T17:00:33.903249: Epoch   0 Batch 2480/3125   train_loss = 1.268\n",
      "2018-12-27T17:00:35.292535: Epoch   0 Batch 2500/3125   train_loss = 1.258\n",
      "2018-12-27T17:00:36.738670: Epoch   0 Batch 2520/3125   train_loss = 1.229\n",
      "2018-12-27T17:00:38.189791: Epoch   0 Batch 2540/3125   train_loss = 1.143\n",
      "2018-12-27T17:00:39.414519: Epoch   0 Batch 2560/3125   train_loss = 1.091\n",
      "2018-12-27T17:00:40.488644: Epoch   0 Batch 2580/3125   train_loss = 1.191\n",
      "2018-12-27T17:00:41.652533: Epoch   0 Batch 2600/3125   train_loss = 1.228\n",
      "2018-12-27T17:00:42.902195: Epoch   0 Batch 2620/3125   train_loss = 1.158\n",
      "2018-12-27T17:00:44.099991: Epoch   0 Batch 2640/3125   train_loss = 1.233\n",
      "2018-12-27T17:00:45.267877: Epoch   0 Batch 2660/3125   train_loss = 1.260\n",
      "2018-12-27T17:00:46.424776: Epoch   0 Batch 2680/3125   train_loss = 1.140\n",
      "2018-12-27T17:00:47.625566: Epoch   0 Batch 2700/3125   train_loss = 1.315\n",
      "2018-12-27T17:00:48.908137: Epoch   0 Batch 2720/3125   train_loss = 1.228\n",
      "2018-12-27T17:00:50.214645: Epoch   0 Batch 2740/3125   train_loss = 1.238\n",
      "2018-12-27T17:00:51.578997: Epoch   0 Batch 2760/3125   train_loss = 1.260\n",
      "2018-12-27T17:00:52.738897: Epoch   0 Batch 2780/3125   train_loss = 1.230\n",
      "2018-12-27T17:00:54.018987: Epoch   0 Batch 2800/3125   train_loss = 1.471\n",
      "2018-12-27T17:00:55.285601: Epoch   0 Batch 2820/3125   train_loss = 1.458\n",
      "2018-12-27T17:00:56.572162: Epoch   0 Batch 2840/3125   train_loss = 1.224\n",
      "2018-12-27T17:00:57.739042: Epoch   0 Batch 2860/3125   train_loss = 1.273\n",
      "2018-12-27T17:00:58.850073: Epoch   0 Batch 2880/3125   train_loss = 1.252\n",
      "2018-12-27T17:00:59.985039: Epoch   0 Batch 2900/3125   train_loss = 1.236\n",
      "2018-12-27T17:01:01.177849: Epoch   0 Batch 2920/3125   train_loss = 1.293\n",
      "2018-12-27T17:01:02.270928: Epoch   0 Batch 2940/3125   train_loss = 1.184\n",
      "2018-12-27T17:01:03.538539: Epoch   0 Batch 2960/3125   train_loss = 1.219\n",
      "2018-12-27T17:01:04.833079: Epoch   0 Batch 2980/3125   train_loss = 1.184\n",
      "2018-12-27T17:01:06.077753: Epoch   0 Batch 3000/3125   train_loss = 1.247\n",
      "2018-12-27T17:01:07.334392: Epoch   0 Batch 3020/3125   train_loss = 1.305\n",
      "2018-12-27T17:01:08.506259: Epoch   0 Batch 3040/3125   train_loss = 1.189\n",
      "2018-12-27T17:01:09.749937: Epoch   0 Batch 3060/3125   train_loss = 1.284\n",
      "2018-12-27T17:01:10.982639: Epoch   0 Batch 3080/3125   train_loss = 1.286\n",
      "2018-12-27T17:01:12.359959: Epoch   0 Batch 3100/3125   train_loss = 1.270\n",
      "2018-12-27T17:01:13.650508: Epoch   0 Batch 3120/3125   train_loss = 1.108\n",
      "2018-12-27T17:01:14.079363: Epoch   0 Batch    0/781   test_loss = 1.026\n",
      "2018-12-27T17:01:14.441819: Epoch   0 Batch   20/781   test_loss = 1.225\n",
      "2018-12-27T17:01:14.769942: Epoch   0 Batch   40/781   test_loss = 1.195\n",
      "2018-12-27T17:01:15.102054: Epoch   0 Batch   60/781   test_loss = 1.387\n",
      "2018-12-27T17:01:15.543873: Epoch   0 Batch   80/781   test_loss = 1.408\n",
      "2018-12-27T17:01:16.016610: Epoch   0 Batch  100/781   test_loss = 1.460\n",
      "2018-12-27T17:01:16.364679: Epoch   0 Batch  120/781   test_loss = 1.299\n",
      "2018-12-27T17:01:16.741671: Epoch   0 Batch  140/781   test_loss = 1.325\n",
      "2018-12-27T17:01:17.110685: Epoch   0 Batch  160/781   test_loss = 1.495\n",
      "2018-12-27T17:01:17.576440: Epoch   0 Batch  180/781   test_loss = 1.349\n",
      "2018-12-27T17:01:18.045187: Epoch   0 Batch  200/781   test_loss = 1.353\n",
      "2018-12-27T17:01:18.632617: Epoch   0 Batch  220/781   test_loss = 1.036\n",
      "2018-12-27T17:01:19.046512: Epoch   0 Batch  240/781   test_loss = 1.218\n",
      "2018-12-27T17:01:19.373636: Epoch   0 Batch  260/781   test_loss = 1.299\n",
      "2018-12-27T17:01:19.698767: Epoch   0 Batch  280/781   test_loss = 1.538\n",
      "2018-12-27T17:01:20.013924: Epoch   0 Batch  300/781   test_loss = 1.277\n",
      "2018-12-27T17:01:20.345038: Epoch   0 Batch  320/781   test_loss = 1.448\n",
      "2018-12-27T17:01:20.678148: Epoch   0 Batch  340/781   test_loss = 0.954\n",
      "2018-12-27T17:01:21.000287: Epoch   0 Batch  360/781   test_loss = 1.333\n",
      "2018-12-27T17:01:21.368303: Epoch   0 Batch  380/781   test_loss = 1.218\n",
      "2018-12-27T17:01:21.699418: Epoch   0 Batch  400/781   test_loss = 1.221\n",
      "2018-12-27T17:01:22.009589: Epoch   0 Batch  420/781   test_loss = 1.079\n",
      "2018-12-27T17:01:22.450411: Epoch   0 Batch  440/781   test_loss = 1.326\n",
      "2018-12-27T17:01:22.950075: Epoch   0 Batch  460/781   test_loss = 1.236\n",
      "2018-12-27T17:01:23.275206: Epoch   0 Batch  480/781   test_loss = 1.231\n",
      "2018-12-27T17:01:23.766892: Epoch   0 Batch  500/781   test_loss = 1.025\n",
      "2018-12-27T17:01:24.203724: Epoch   0 Batch  520/781   test_loss = 1.288\n",
      "2018-12-27T17:01:24.651527: Epoch   0 Batch  540/781   test_loss = 1.140\n",
      "2018-12-27T17:01:25.023532: Epoch   0 Batch  560/781   test_loss = 1.464\n",
      "2018-12-27T17:01:25.345671: Epoch   0 Batch  580/781   test_loss = 1.274\n",
      "2018-12-27T17:01:25.821399: Epoch   0 Batch  600/781   test_loss = 1.257\n",
      "2018-12-27T17:01:26.146531: Epoch   0 Batch  620/781   test_loss = 1.371\n",
      "2018-12-27T17:01:26.652178: Epoch   0 Batch  640/781   test_loss = 1.340\n",
      "2018-12-27T17:01:26.989277: Epoch   0 Batch  660/781   test_loss = 1.272\n",
      "2018-12-27T17:01:27.396190: Epoch   0 Batch  680/781   test_loss = 1.571\n",
      "2018-12-27T17:01:27.913805: Epoch   0 Batch  700/781   test_loss = 1.173\n",
      "2018-12-27T17:01:28.440398: Epoch   0 Batch  720/781   test_loss = 1.388\n",
      "2018-12-27T17:01:28.934078: Epoch   0 Batch  740/781   test_loss = 1.334\n",
      "2018-12-27T17:01:29.369913: Epoch   0 Batch  760/781   test_loss = 1.297\n",
      "2018-12-27T17:01:29.693049: Epoch   0 Batch  780/781   test_loss = 1.262\n",
      "2018-12-27T17:01:31.575019: Epoch   1 Batch   15/3125   train_loss = 1.311\n",
      "2018-12-27T17:01:32.870556: Epoch   1 Batch   35/3125   train_loss = 1.214\n",
      "2018-12-27T17:01:34.060375: Epoch   1 Batch   55/3125   train_loss = 1.365\n",
      "2018-12-27T17:01:35.235235: Epoch   1 Batch   75/3125   train_loss = 1.184\n",
      "2018-12-27T17:01:36.489880: Epoch   1 Batch   95/3125   train_loss = 1.086\n",
      "2018-12-27T17:01:37.785416: Epoch   1 Batch  115/3125   train_loss = 1.274\n",
      "2018-12-27T17:01:38.918388: Epoch   1 Batch  135/3125   train_loss = 1.134\n",
      "2018-12-27T17:01:40.224896: Epoch   1 Batch  155/3125   train_loss = 1.159\n",
      "2018-12-27T17:01:41.488518: Epoch   1 Batch  175/3125   train_loss = 1.194\n",
      "2018-12-27T17:01:42.773083: Epoch   1 Batch  195/3125   train_loss = 1.215\n",
      "2018-12-27T17:01:44.171346: Epoch   1 Batch  215/3125   train_loss = 1.235\n",
      "2018-12-27T17:01:45.402056: Epoch   1 Batch  235/3125   train_loss = 1.209\n",
      "2018-12-27T17:01:46.642833: Epoch   1 Batch  255/3125   train_loss = 1.284\n",
      "2018-12-27T17:01:47.829655: Epoch   1 Batch  275/3125   train_loss = 1.185\n",
      "2018-12-27T17:01:49.041416: Epoch   1 Batch  295/3125   train_loss = 1.120\n",
      "2018-12-27T17:01:50.208296: Epoch   1 Batch  315/3125   train_loss = 1.173\n",
      "2018-12-27T17:01:51.455961: Epoch   1 Batch  335/3125   train_loss = 1.071\n",
      "2018-12-27T17:01:52.634810: Epoch   1 Batch  355/3125   train_loss = 1.274\n",
      "2018-12-27T17:01:54.091915: Epoch   1 Batch  375/3125   train_loss = 1.258\n",
      "2018-12-27T17:01:55.409392: Epoch   1 Batch  395/3125   train_loss = 1.170\n",
      "2018-12-27T17:01:57.070952: Epoch   1 Batch  415/3125   train_loss = 1.368\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2018-12-27T17:01:58.308643: Epoch   1 Batch  435/3125   train_loss = 1.281\n",
      "2018-12-27T17:01:59.446600: Epoch   1 Batch  455/3125   train_loss = 1.239\n",
      "2018-12-27T17:02:00.633428: Epoch   1 Batch  475/3125   train_loss = 1.324\n",
      "2018-12-27T17:02:01.742463: Epoch   1 Batch  495/3125   train_loss = 1.141\n",
      "2018-12-27T17:02:02.856485: Epoch   1 Batch  515/3125   train_loss = 1.235\n",
      "2018-12-27T17:02:04.021371: Epoch   1 Batch  535/3125   train_loss = 1.266\n",
      "2018-12-27T17:02:05.255072: Epoch   1 Batch  555/3125   train_loss = 1.387\n",
      "2018-12-27T17:02:06.507723: Epoch   1 Batch  575/3125   train_loss = 1.269\n",
      "2018-12-27T17:02:07.638701: Epoch   1 Batch  595/3125   train_loss = 1.336\n",
      "2018-12-27T17:02:08.763694: Epoch   1 Batch  615/3125   train_loss = 1.156\n",
      "2018-12-27T17:02:09.897661: Epoch   1 Batch  635/3125   train_loss = 1.245\n",
      "2018-12-27T17:02:11.016670: Epoch   1 Batch  655/3125   train_loss = 1.147\n",
      "2018-12-27T17:02:12.155628: Epoch   1 Batch  675/3125   train_loss = 1.057\n",
      "2018-12-27T17:02:13.309541: Epoch   1 Batch  695/3125   train_loss = 1.146\n",
      "2018-12-27T17:02:14.426554: Epoch   1 Batch  715/3125   train_loss = 1.252\n",
      "2018-12-27T17:02:15.598425: Epoch   1 Batch  735/3125   train_loss = 1.061\n",
      "2018-12-27T17:02:16.727405: Epoch   1 Batch  755/3125   train_loss = 1.223\n",
      "2018-12-27T17:02:17.851400: Epoch   1 Batch  775/3125   train_loss = 1.076\n",
      "2018-12-27T17:02:19.008307: Epoch   1 Batch  795/3125   train_loss = 1.395\n",
      "2018-12-27T17:02:20.101385: Epoch   1 Batch  815/3125   train_loss = 1.218\n",
      "2018-12-27T17:02:21.235353: Epoch   1 Batch  835/3125   train_loss = 1.231\n",
      "2018-12-27T17:02:22.401237: Epoch   1 Batch  855/3125   train_loss = 1.328\n",
      "2018-12-27T17:02:23.518251: Epoch   1 Batch  875/3125   train_loss = 1.336\n",
      "2018-12-27T17:02:24.719042: Epoch   1 Batch  895/3125   train_loss = 1.201\n",
      "2018-12-27T17:02:25.864978: Epoch   1 Batch  915/3125   train_loss = 1.186\n",
      "2018-12-27T17:02:26.953069: Epoch   1 Batch  935/3125   train_loss = 1.335\n",
      "2018-12-27T17:02:28.145881: Epoch   1 Batch  955/3125   train_loss = 1.362\n",
      "2018-12-27T17:02:29.268878: Epoch   1 Batch  975/3125   train_loss = 1.281\n",
      "2018-12-27T17:02:30.401849: Epoch   1 Batch  995/3125   train_loss = 0.984\n",
      "2018-12-27T17:02:31.524847: Epoch   1 Batch 1015/3125   train_loss = 1.170\n",
      "2018-12-27T17:02:32.640864: Epoch   1 Batch 1035/3125   train_loss = 1.186\n",
      "2018-12-27T17:02:33.833676: Epoch   1 Batch 1055/3125   train_loss = 1.250\n",
      "2018-12-27T17:02:34.943709: Epoch   1 Batch 1075/3125   train_loss = 1.095\n",
      "2018-12-27T17:02:36.098635: Epoch   1 Batch 1095/3125   train_loss = 1.167\n",
      "2018-12-27T17:02:37.304397: Epoch   1 Batch 1115/3125   train_loss = 1.245\n",
      "2018-12-27T17:02:38.416424: Epoch   1 Batch 1135/3125   train_loss = 1.175\n",
      "2018-12-27T17:02:39.555385: Epoch   1 Batch 1155/3125   train_loss = 1.287\n",
      "2018-12-27T17:02:40.694336: Epoch   1 Batch 1175/3125   train_loss = 1.256\n",
      "2018-12-27T17:02:41.812347: Epoch   1 Batch 1195/3125   train_loss = 1.411\n",
      "2018-12-27T17:02:43.002167: Epoch   1 Batch 1215/3125   train_loss = 1.104\n",
      "2018-12-27T17:02:44.105218: Epoch   1 Batch 1235/3125   train_loss = 1.206\n",
      "2018-12-27T17:02:45.329942: Epoch   1 Batch 1255/3125   train_loss = 1.126\n",
      "2018-12-27T17:02:46.613512: Epoch   1 Batch 1275/3125   train_loss = 1.122\n",
      "2018-12-27T17:02:47.815298: Epoch   1 Batch 1295/3125   train_loss = 1.206\n",
      "2018-12-27T17:02:49.087897: Epoch   1 Batch 1315/3125   train_loss = 1.353\n",
      "2018-12-27T17:02:50.268741: Epoch   1 Batch 1335/3125   train_loss = 1.139\n",
      "2018-12-27T17:02:51.387750: Epoch   1 Batch 1355/3125   train_loss = 1.164\n",
      "2018-12-27T17:02:52.530695: Epoch   1 Batch 1375/3125   train_loss = 1.233\n",
      "2018-12-27T17:02:53.657682: Epoch   1 Batch 1395/3125   train_loss = 1.271\n",
      "2018-12-27T17:02:54.794643: Epoch   1 Batch 1415/3125   train_loss = 1.239\n",
      "2018-12-27T17:02:55.937586: Epoch   1 Batch 1435/3125   train_loss = 1.267\n",
      "2018-12-27T17:02:57.047618: Epoch   1 Batch 1455/3125   train_loss = 1.289\n",
      "2018-12-27T17:02:58.211508: Epoch   1 Batch 1475/3125   train_loss = 1.234\n",
      "2018-12-27T17:02:59.328522: Epoch   1 Batch 1495/3125   train_loss = 1.117\n",
      "2018-12-27T17:03:00.469473: Epoch   1 Batch 1515/3125   train_loss = 1.126\n",
      "2018-12-27T17:03:01.610422: Epoch   1 Batch 1535/3125   train_loss = 1.040\n",
      "2018-12-27T17:03:02.744391: Epoch   1 Batch 1555/3125   train_loss = 1.257\n",
      "2018-12-27T17:03:03.904290: Epoch   1 Batch 1575/3125   train_loss = 1.225\n",
      "2018-12-27T17:03:05.026289: Epoch   1 Batch 1595/3125   train_loss = 1.257\n",
      "2018-12-27T17:03:06.185193: Epoch   1 Batch 1615/3125   train_loss = 1.190\n",
      "2018-12-27T17:03:07.358057: Epoch   1 Batch 1635/3125   train_loss = 1.195\n",
      "2018-12-27T17:03:08.467093: Epoch   1 Batch 1655/3125   train_loss = 1.317\n",
      "2018-12-27T17:03:09.621010: Epoch   1 Batch 1675/3125   train_loss = 1.127\n",
      "2018-12-27T17:03:10.779910: Epoch   1 Batch 1695/3125   train_loss = 1.231\n",
      "2018-12-27T17:03:11.900913: Epoch   1 Batch 1715/3125   train_loss = 1.191\n",
      "2018-12-27T17:03:13.097720: Epoch   1 Batch 1735/3125   train_loss = 1.393\n",
      "2018-12-27T17:03:14.232680: Epoch   1 Batch 1755/3125   train_loss = 1.181\n",
      "2018-12-27T17:03:15.370639: Epoch   1 Batch 1775/3125   train_loss = 1.161\n",
      "2018-12-27T17:03:16.580404: Epoch   1 Batch 1795/3125   train_loss = 1.170\n",
      "2018-12-27T17:03:17.682458: Epoch   1 Batch 1815/3125   train_loss = 1.165\n",
      "2018-12-27T17:03:18.875270: Epoch   1 Batch 1835/3125   train_loss = 1.274\n",
      "2018-12-27T17:03:19.986299: Epoch   1 Batch 1855/3125   train_loss = 1.134\n",
      "2018-12-27T17:03:21.106305: Epoch   1 Batch 1875/3125   train_loss = 1.437\n",
      "2018-12-27T17:03:22.310087: Epoch   1 Batch 1895/3125   train_loss = 1.181\n",
      "2018-12-27T17:03:23.422115: Epoch   1 Batch 1915/3125   train_loss = 1.095\n",
      "2018-12-27T17:03:24.576030: Epoch   1 Batch 1935/3125   train_loss = 1.194\n",
      "2018-12-27T17:03:25.750888: Epoch   1 Batch 1955/3125   train_loss = 1.053\n",
      "2018-12-27T17:03:26.886852: Epoch   1 Batch 1975/3125   train_loss = 1.146\n",
      "2018-12-27T17:03:28.074676: Epoch   1 Batch 1995/3125   train_loss = 1.252\n",
      "2018-12-27T17:03:29.167754: Epoch   1 Batch 2015/3125   train_loss = 1.343\n",
      "2018-12-27T17:03:30.306711: Epoch   1 Batch 2035/3125   train_loss = 1.209\n",
      "2018-12-27T17:03:31.497527: Epoch   1 Batch 2055/3125   train_loss = 1.159\n",
      "2018-12-27T17:03:32.624514: Epoch   1 Batch 2075/3125   train_loss = 1.339\n",
      "2018-12-27T17:03:33.809347: Epoch   1 Batch 2095/3125   train_loss = 1.108\n",
      "2018-12-27T17:03:34.956281: Epoch   1 Batch 2115/3125   train_loss = 1.246\n",
      "2018-12-27T17:03:36.109198: Epoch   1 Batch 2135/3125   train_loss = 1.092\n",
      "2018-12-27T17:03:37.289045: Epoch   1 Batch 2155/3125   train_loss = 1.242\n",
      "2018-12-27T17:03:38.412043: Epoch   1 Batch 2175/3125   train_loss = 1.238\n",
      "2018-12-27T17:03:39.544017: Epoch   1 Batch 2195/3125   train_loss = 1.205\n",
      "2018-12-27T17:03:40.678982: Epoch   1 Batch 2215/3125   train_loss = 1.281\n",
      "2018-12-27T17:03:41.803976: Epoch   1 Batch 2235/3125   train_loss = 1.305\n",
      "2018-12-27T17:03:42.951907: Epoch   1 Batch 2255/3125   train_loss = 1.265\n",
      "2018-12-27T17:03:44.076900: Epoch   1 Batch 2275/3125   train_loss = 1.126\n",
      "2018-12-27T17:03:45.216851: Epoch   1 Batch 2295/3125   train_loss = 1.472\n",
      "2018-12-27T17:03:46.470781: Epoch   1 Batch 2315/3125   train_loss = 1.391\n",
      "2018-12-27T17:03:47.604750: Epoch   1 Batch 2335/3125   train_loss = 1.222\n",
      "2018-12-27T17:03:48.747695: Epoch   1 Batch 2355/3125   train_loss = 1.227\n",
      "2018-12-27T17:03:49.861716: Epoch   1 Batch 2375/3125   train_loss = 1.414\n",
      "2018-12-27T17:03:51.065498: Epoch   1 Batch 2395/3125   train_loss = 1.241\n",
      "2018-12-27T17:03:52.298203: Epoch   1 Batch 2415/3125   train_loss = 1.233\n",
      "2018-12-27T17:03:53.497997: Epoch   1 Batch 2435/3125   train_loss = 1.152\n",
      "2018-12-27T17:03:54.753639: Epoch   1 Batch 2455/3125   train_loss = 1.285\n",
      "2018-12-27T17:03:56.020253: Epoch   1 Batch 2475/3125   train_loss = 1.008\n",
      "2018-12-27T17:03:57.298835: Epoch   1 Batch 2495/3125   train_loss = 1.095\n",
      "2018-12-27T17:03:58.527551: Epoch   1 Batch 2515/3125   train_loss = 1.232\n",
      "2018-12-27T17:03:59.664511: Epoch   1 Batch 2535/3125   train_loss = 1.330\n",
      "2018-12-27T17:04:00.880263: Epoch   1 Batch 2555/3125   train_loss = 1.112\n",
      "2018-12-27T17:04:02.006251: Epoch   1 Batch 2575/3125   train_loss = 1.122\n",
      "2018-12-27T17:04:03.146204: Epoch   1 Batch 2595/3125   train_loss = 1.211\n",
      "2018-12-27T17:04:04.351000: Epoch   1 Batch 2615/3125   train_loss = 1.223\n",
      "2018-12-27T17:04:05.486946: Epoch   1 Batch 2635/3125   train_loss = 1.097\n",
      "2018-12-27T17:04:06.687737: Epoch   1 Batch 2655/3125   train_loss = 1.218\n",
      "2018-12-27T17:04:07.862596: Epoch   1 Batch 2675/3125   train_loss = 1.139\n",
      "2018-12-27T17:04:08.990581: Epoch   1 Batch 2695/3125   train_loss = 1.333\n",
      "2018-12-27T17:04:10.184389: Epoch   1 Batch 2715/3125   train_loss = 1.198\n",
      "2018-12-27T17:04:11.306391: Epoch   1 Batch 2735/3125   train_loss = 1.048\n",
      "2018-12-27T17:04:12.431388: Epoch   1 Batch 2755/3125   train_loss = 1.141\n",
      "2018-12-27T17:04:13.624195: Epoch   1 Batch 2775/3125   train_loss = 1.325\n",
      "2018-12-27T17:04:14.761154: Epoch   1 Batch 2795/3125   train_loss = 1.315\n",
      "2018-12-27T17:04:15.994859: Epoch   1 Batch 2815/3125   train_loss = 1.171\n",
      "2018-12-27T17:04:17.271444: Epoch   1 Batch 2835/3125   train_loss = 1.224\n",
      "2018-12-27T17:04:18.507144: Epoch   1 Batch 2855/3125   train_loss = 1.230\n",
      "2018-12-27T17:04:19.876481: Epoch   1 Batch 2875/3125   train_loss = 1.289\n",
      "2018-12-27T17:04:21.330594: Epoch   1 Batch 2895/3125   train_loss = 1.193\n",
      "2018-12-27T17:04:22.693948: Epoch   1 Batch 2915/3125   train_loss = 1.132\n",
      "2018-12-27T17:04:23.883769: Epoch   1 Batch 2935/3125   train_loss = 1.266\n",
      "2018-12-27T17:04:25.063658: Epoch   1 Batch 2955/3125   train_loss = 1.322\n",
      "2018-12-27T17:04:26.216532: Epoch   1 Batch 2975/3125   train_loss = 1.213\n",
      "2018-12-27T17:04:27.329556: Epoch   1 Batch 2995/3125   train_loss = 1.227\n",
      "2018-12-27T17:04:28.497434: Epoch   1 Batch 3015/3125   train_loss = 1.177\n",
      "2018-12-27T17:04:29.614449: Epoch   1 Batch 3035/3125   train_loss = 1.262\n",
      "2018-12-27T17:04:30.756395: Epoch   1 Batch 3055/3125   train_loss = 1.255\n",
      "2018-12-27T17:04:31.916295: Epoch   1 Batch 3075/3125   train_loss = 1.156\n",
      "2018-12-27T17:04:33.022338: Epoch   1 Batch 3095/3125   train_loss = 1.201\n",
      "2018-12-27T17:04:34.171267: Epoch   1 Batch 3115/3125   train_loss = 1.073\n",
      "2018-12-27T17:04:34.995498: Epoch   1 Batch   19/781   test_loss = 1.239\n",
      "2018-12-27T17:04:35.335589: Epoch   1 Batch   39/781   test_loss = 1.106\n",
      "2018-12-27T17:04:35.684655: Epoch   1 Batch   59/781   test_loss = 1.043\n",
      "2018-12-27T17:04:36.032724: Epoch   1 Batch   79/781   test_loss = 1.120\n",
      "2018-12-27T17:04:36.357856: Epoch   1 Batch   99/781   test_loss = 1.112\n",
      "2018-12-27T17:04:36.705926: Epoch   1 Batch  119/781   test_loss = 1.274\n",
      "2018-12-27T17:04:37.055990: Epoch   1 Batch  139/781   test_loss = 1.271\n",
      "2018-12-27T17:04:37.391094: Epoch   1 Batch  159/781   test_loss = 1.189\n",
      "2018-12-27T17:04:37.718220: Epoch   1 Batch  179/781   test_loss = 1.235\n",
      "2018-12-27T17:04:38.040358: Epoch   1 Batch  199/781   test_loss = 1.138\n",
      "2018-12-27T17:04:38.366487: Epoch   1 Batch  219/781   test_loss = 1.153\n",
      "2018-12-27T17:04:38.688625: Epoch   1 Batch  239/781   test_loss = 1.302\n",
      "2018-12-27T17:04:38.995803: Epoch   1 Batch  259/781   test_loss = 1.165\n",
      "2018-12-27T17:04:39.303981: Epoch   1 Batch  279/781   test_loss = 1.358\n",
      "2018-12-27T17:04:39.655042: Epoch   1 Batch  299/781   test_loss = 1.372\n",
      "2018-12-27T17:04:40.016076: Epoch   1 Batch  319/781   test_loss = 1.241\n",
      "2018-12-27T17:04:40.351181: Epoch   1 Batch  339/781   test_loss = 1.054\n",
      "2018-12-27T17:04:40.681298: Epoch   1 Batch  359/781   test_loss = 1.108\n",
      "2018-12-27T17:04:41.004435: Epoch   1 Batch  379/781   test_loss = 1.268\n",
      "2018-12-27T17:04:41.333554: Epoch   1 Batch  399/781   test_loss = 1.105\n",
      "2018-12-27T17:04:41.643725: Epoch   1 Batch  419/781   test_loss = 1.136\n",
      "2018-12-27T17:04:41.969855: Epoch   1 Batch  439/781   test_loss = 1.232\n",
      "2018-12-27T17:04:42.291993: Epoch   1 Batch  459/781   test_loss = 1.273\n",
      "2018-12-27T17:04:42.629092: Epoch   1 Batch  479/781   test_loss = 1.345\n",
      "2018-12-27T17:04:42.974168: Epoch   1 Batch  499/781   test_loss = 1.099\n",
      "2018-12-27T17:04:43.305284: Epoch   1 Batch  519/781   test_loss = 1.343\n",
      "2018-12-27T17:04:43.634404: Epoch   1 Batch  539/781   test_loss = 1.071\n",
      "2018-12-27T17:04:43.961529: Epoch   1 Batch  559/781   test_loss = 1.439\n",
      "2018-12-27T17:04:44.293641: Epoch   1 Batch  579/781   test_loss = 1.344\n",
      "2018-12-27T17:04:44.616778: Epoch   1 Batch  599/781   test_loss = 1.183\n",
      "2018-12-27T17:04:44.945897: Epoch   1 Batch  619/781   test_loss = 1.350\n",
      "2018-12-27T17:04:45.293967: Epoch   1 Batch  639/781   test_loss = 1.215\n",
      "2018-12-27T17:04:45.680932: Epoch   1 Batch  659/781   test_loss = 1.372\n",
      "2018-12-27T17:04:46.032993: Epoch   1 Batch  679/781   test_loss = 1.293\n",
      "2018-12-27T17:04:46.345157: Epoch   1 Batch  699/781   test_loss = 1.058\n",
      "2018-12-27T17:04:46.674279: Epoch   1 Batch  719/781   test_loss = 1.135\n",
      "2018-12-27T17:04:47.004395: Epoch   1 Batch  739/781   test_loss = 1.242\n",
      "2018-12-27T17:04:47.338502: Epoch   1 Batch  759/781   test_loss = 1.160\n",
      "2018-12-27T17:04:47.651664: Epoch   1 Batch  779/781   test_loss = 1.118\n",
      "2018-12-27T17:04:48.900327: Epoch   2 Batch   10/3125   train_loss = 1.095\n",
      "2018-12-27T17:04:50.038285: Epoch   2 Batch   30/3125   train_loss = 1.200\n",
      "2018-12-27T17:04:51.172253: Epoch   2 Batch   50/3125   train_loss = 1.233\n",
      "2018-12-27T17:04:52.355092: Epoch   2 Batch   70/3125   train_loss = 1.290\n",
      "2018-12-27T17:04:53.466122: Epoch   2 Batch   90/3125   train_loss = 1.267\n",
      "2018-12-27T17:04:54.598096: Epoch   2 Batch  110/3125   train_loss = 1.163\n",
      "2018-12-27T17:04:55.810854: Epoch   2 Batch  130/3125   train_loss = 1.192\n",
      "2018-12-27T17:04:56.957788: Epoch   2 Batch  150/3125   train_loss = 1.259\n",
      "2018-12-27T17:04:58.173538: Epoch   2 Batch  170/3125   train_loss = 1.185\n",
      "2018-12-27T17:04:59.349395: Epoch   2 Batch  190/3125   train_loss = 1.314\n",
      "2018-12-27T17:05:00.615010: Epoch   2 Batch  210/3125   train_loss = 1.176\n",
      "2018-12-27T17:05:01.873646: Epoch   2 Batch  230/3125   train_loss = 1.225\n",
      "2018-12-27T17:05:03.107349: Epoch   2 Batch  250/3125   train_loss = 1.122\n",
      "2018-12-27T17:05:04.385931: Epoch   2 Batch  270/3125   train_loss = 1.093\n",
      "2018-12-27T17:05:05.529872: Epoch   2 Batch  290/3125   train_loss = 1.205\n",
      "2018-12-27T17:05:06.967030: Epoch   2 Batch  310/3125   train_loss = 1.206\n",
      "2018-12-27T17:05:08.307448: Epoch   2 Batch  330/3125   train_loss = 1.191\n",
      "2018-12-27T17:05:09.506243: Epoch   2 Batch  350/3125   train_loss = 1.094\n",
      "2018-12-27T17:05:10.747955: Epoch   2 Batch  370/3125   train_loss = 1.299\n",
      "2018-12-27T17:05:11.902382: Epoch   2 Batch  390/3125   train_loss = 1.397\n",
      "2018-12-27T17:05:13.077241: Epoch   2 Batch  410/3125   train_loss = 1.080\n",
      "2018-12-27T17:05:14.203231: Epoch   2 Batch  430/3125   train_loss = 1.285\n",
      "2018-12-27T17:05:15.329221: Epoch   2 Batch  450/3125   train_loss = 1.112\n",
      "2018-12-27T17:05:16.532006: Epoch   2 Batch  470/3125   train_loss = 1.046\n",
      "2018-12-27T17:05:17.668966: Epoch   2 Batch  490/3125   train_loss = 1.249\n",
      "2018-12-27T17:05:18.795955: Epoch   2 Batch  510/3125   train_loss = 1.221\n",
      "2018-12-27T17:05:19.960839: Epoch   2 Batch  530/3125   train_loss = 1.236\n",
      "2018-12-27T17:05:21.079849: Epoch   2 Batch  550/3125   train_loss = 1.158\n",
      "2018-12-27T17:05:22.244734: Epoch   2 Batch  570/3125   train_loss = 1.362\n",
      "2018-12-27T17:05:23.395658: Epoch   2 Batch  590/3125   train_loss = 1.262\n",
      "2018-12-27T17:05:24.589470: Epoch   2 Batch  610/3125   train_loss = 1.210\n",
      "2018-12-27T17:05:25.889989: Epoch   2 Batch  630/3125   train_loss = 1.131\n",
      "2018-12-27T17:05:27.140646: Epoch   2 Batch  650/3125   train_loss = 1.290\n",
      "2018-12-27T17:05:28.427207: Epoch   2 Batch  670/3125   train_loss = 1.085\n",
      "2018-12-27T17:05:29.606055: Epoch   2 Batch  690/3125   train_loss = 1.228\n",
      "2018-12-27T17:05:30.757976: Epoch   2 Batch  710/3125   train_loss = 1.091\n",
      "2018-12-27T17:05:31.933833: Epoch   2 Batch  730/3125   train_loss = 1.045\n",
      "2018-12-27T17:05:33.081765: Epoch   2 Batch  750/3125   train_loss = 1.170\n",
      "2018-12-27T17:05:34.374310: Epoch   2 Batch  770/3125   train_loss = 1.021\n",
      "2018-12-27T17:05:35.660870: Epoch   2 Batch  790/3125   train_loss = 1.122\n",
      "2018-12-27T17:05:36.833737: Epoch   2 Batch  810/3125   train_loss = 1.028\n",
      "2018-12-27T17:05:38.002610: Epoch   2 Batch  830/3125   train_loss = 0.993\n",
      "2018-12-27T17:05:39.148547: Epoch   2 Batch  850/3125   train_loss = 1.205\n",
      "2018-12-27T17:05:40.374271: Epoch   2 Batch  870/3125   train_loss = 1.022\n",
      "2018-12-27T17:05:41.512227: Epoch   2 Batch  890/3125   train_loss = 1.084\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2018-12-27T17:05:42.669136: Epoch   2 Batch  910/3125   train_loss = 1.203\n",
      "2018-12-27T17:05:43.920789: Epoch   2 Batch  930/3125   train_loss = 1.270\n",
      "2018-12-27T17:05:45.100636: Epoch   2 Batch  950/3125   train_loss = 1.146\n",
      "2018-12-27T17:05:46.408471: Epoch   2 Batch  970/3125   train_loss = 1.266\n",
      "2018-12-27T17:05:47.558397: Epoch   2 Batch  990/3125   train_loss = 1.089\n",
      "2018-12-27T17:05:48.716301: Epoch   2 Batch 1010/3125   train_loss = 1.386\n",
      "2018-12-27T17:05:49.903129: Epoch   2 Batch 1030/3125   train_loss = 1.102\n",
      "2018-12-27T17:05:51.044078: Epoch   2 Batch 1050/3125   train_loss = 1.177\n",
      "2018-12-27T17:05:52.255840: Epoch   2 Batch 1070/3125   train_loss = 1.180\n",
      "2018-12-27T17:05:53.417732: Epoch   2 Batch 1090/3125   train_loss = 1.220\n",
      "2018-12-27T17:05:54.549708: Epoch   2 Batch 1110/3125   train_loss = 1.338\n",
      "2018-12-27T17:05:55.866188: Epoch   2 Batch 1130/3125   train_loss = 1.212\n",
      "2018-12-27T17:05:57.165714: Epoch   2 Batch 1150/3125   train_loss = 1.161\n",
      "2018-12-27T17:05:58.423352: Epoch   2 Batch 1170/3125   train_loss = 1.131\n",
      "2018-12-27T17:05:59.559315: Epoch   2 Batch 1190/3125   train_loss = 1.231\n",
      "2018-12-27T17:06:00.733179: Epoch   2 Batch 1210/3125   train_loss = 1.125\n",
      "2018-12-27T17:06:01.908035: Epoch   2 Batch 1230/3125   train_loss = 1.147\n",
      "2018-12-27T17:06:03.038016: Epoch   2 Batch 1250/3125   train_loss = 1.130\n",
      "2018-12-27T17:06:04.208887: Epoch   2 Batch 1270/3125   train_loss = 1.197\n",
      "2018-12-27T17:06:05.408679: Epoch   2 Batch 1290/3125   train_loss = 1.166\n",
      "2018-12-27T17:06:06.708204: Epoch   2 Batch 1310/3125   train_loss = 1.195\n",
      "2018-12-27T17:06:08.027709: Epoch   2 Batch 1330/3125   train_loss = 1.337\n",
      "2018-12-27T17:06:09.275875: Epoch   2 Batch 1350/3125   train_loss = 1.130\n",
      "2018-12-27T17:06:10.466693: Epoch   2 Batch 1370/3125   train_loss = 1.024\n",
      "2018-12-27T17:06:11.581712: Epoch   2 Batch 1390/3125   train_loss = 1.197\n",
      "2018-12-27T17:06:12.726653: Epoch   2 Batch 1410/3125   train_loss = 1.184\n",
      "2018-12-27T17:06:13.886551: Epoch   2 Batch 1430/3125   train_loss = 1.308\n",
      "2018-12-27T17:06:15.002568: Epoch   2 Batch 1450/3125   train_loss = 1.248\n",
      "2018-12-27T17:06:16.189396: Epoch   2 Batch 1470/3125   train_loss = 1.181\n",
      "2018-12-27T17:06:17.337326: Epoch   2 Batch 1490/3125   train_loss = 1.187\n",
      "2018-12-27T17:06:18.517297: Epoch   2 Batch 1510/3125   train_loss = 1.031\n",
      "2018-12-27T17:06:19.679190: Epoch   2 Batch 1530/3125   train_loss = 1.325\n",
      "2018-12-27T17:06:20.897932: Epoch   2 Batch 1550/3125   train_loss = 1.012\n",
      "2018-12-27T17:06:22.212418: Epoch   2 Batch 1570/3125   train_loss = 1.055\n",
      "2018-12-27T17:06:23.372317: Epoch   2 Batch 1590/3125   train_loss = 1.185\n",
      "2018-12-27T17:06:24.582083: Epoch   2 Batch 1610/3125   train_loss = 1.014\n",
      "2018-12-27T17:06:25.879614: Epoch   2 Batch 1630/3125   train_loss = 1.092\n",
      "2018-12-27T17:06:27.002614: Epoch   2 Batch 1650/3125   train_loss = 1.032\n",
      "2018-12-27T17:06:28.231328: Epoch   2 Batch 1670/3125   train_loss = 0.962\n",
      "2018-12-27T17:06:29.385243: Epoch   2 Batch 1690/3125   train_loss = 1.159\n",
      "2018-12-27T17:06:30.531180: Epoch   2 Batch 1710/3125   train_loss = 1.158\n",
      "2018-12-27T17:06:31.828712: Epoch   2 Batch 1730/3125   train_loss = 1.209\n",
      "2018-12-27T17:06:33.130233: Epoch   2 Batch 1750/3125   train_loss = 1.068\n",
      "2018-12-27T17:06:34.366949: Epoch   2 Batch 1770/3125   train_loss = 1.212\n",
      "2018-12-27T17:06:35.560758: Epoch   2 Batch 1790/3125   train_loss = 1.210\n",
      "2018-12-27T17:06:36.732625: Epoch   2 Batch 1810/3125   train_loss = 1.223\n",
      "2018-12-27T17:06:37.981287: Epoch   2 Batch 1830/3125   train_loss = 1.184\n",
      "2018-12-27T17:06:39.120243: Epoch   2 Batch 1850/3125   train_loss = 1.112\n",
      "2018-12-27T17:06:40.333000: Epoch   2 Batch 1870/3125   train_loss = 1.288\n",
      "2018-12-27T17:06:41.496889: Epoch   2 Batch 1890/3125   train_loss = 0.972\n",
      "2018-12-27T17:06:42.629860: Epoch   2 Batch 1910/3125   train_loss = 1.160\n",
      "2018-12-27T17:06:43.831649: Epoch   2 Batch 1930/3125   train_loss = 1.151\n",
      "2018-12-27T17:06:44.963627: Epoch   2 Batch 1950/3125   train_loss = 1.066\n",
      "2018-12-27T17:06:46.211286: Epoch   2 Batch 1970/3125   train_loss = 1.204\n",
      "2018-12-27T17:06:47.398114: Epoch   2 Batch 1990/3125   train_loss = 1.020\n",
      "2018-12-27T17:06:48.684675: Epoch   2 Batch 2010/3125   train_loss = 0.973\n",
      "2018-12-27T17:06:50.109394: Epoch   2 Batch 2030/3125   train_loss = 1.123\n",
      "2018-12-27T17:06:51.333123: Epoch   2 Batch 2050/3125   train_loss = 1.103\n",
      "2018-12-27T17:06:52.505987: Epoch   2 Batch 2070/3125   train_loss = 1.071\n",
      "2018-12-27T17:06:53.700799: Epoch   2 Batch 2090/3125   train_loss = 1.111\n",
      "2018-12-27T17:06:54.978378: Epoch   2 Batch 2110/3125   train_loss = 1.230\n",
      "2018-12-27T17:06:56.257957: Epoch   2 Batch 2130/3125   train_loss = 1.096\n",
      "2018-12-27T17:06:57.580421: Epoch   2 Batch 2150/3125   train_loss = 1.271\n",
      "2018-12-27T17:06:59.062461: Epoch   2 Batch 2170/3125   train_loss = 1.041\n",
      "2018-12-27T17:07:00.552478: Epoch   2 Batch 2190/3125   train_loss = 1.181\n",
      "2018-12-27T17:07:01.978665: Epoch   2 Batch 2210/3125   train_loss = 1.077\n",
      "2018-12-27T17:07:03.370944: Epoch   2 Batch 2230/3125   train_loss = 1.092\n",
      "2018-12-27T17:07:04.731306: Epoch   2 Batch 2250/3125   train_loss = 1.185\n",
      "2018-12-27T17:07:06.106630: Epoch   2 Batch 2270/3125   train_loss = 1.106\n",
      "2018-12-27T17:07:07.372247: Epoch   2 Batch 2290/3125   train_loss = 1.000\n",
      "2018-12-27T17:07:08.645841: Epoch   2 Batch 2310/3125   train_loss = 1.113\n",
      "2018-12-27T17:07:10.110925: Epoch   2 Batch 2330/3125   train_loss = 1.160\n",
      "2018-12-27T17:07:11.655796: Epoch   2 Batch 2350/3125   train_loss = 1.217\n",
      "2018-12-27T17:07:12.963300: Epoch   2 Batch 2370/3125   train_loss = 1.075\n",
      "2018-12-27T17:07:14.260832: Epoch   2 Batch 2390/3125   train_loss = 1.157\n",
      "2018-12-27T17:07:15.591275: Epoch   2 Batch 2410/3125   train_loss = 1.239\n",
      "2018-12-27T17:07:16.858892: Epoch   2 Batch 2430/3125   train_loss = 1.170\n",
      "2018-12-27T17:07:18.020781: Epoch   2 Batch 2450/3125   train_loss = 1.175\n",
      "2018-12-27T17:07:19.344247: Epoch   2 Batch 2470/3125   train_loss = 1.238\n",
      "2018-12-27T17:07:20.656735: Epoch   2 Batch 2490/3125   train_loss = 1.138\n",
      "2018-12-27T17:07:22.122815: Epoch   2 Batch 2510/3125   train_loss = 1.232\n",
      "2018-12-27T17:07:23.583911: Epoch   2 Batch 2530/3125   train_loss = 0.972\n",
      "2018-12-27T17:07:24.854513: Epoch   2 Batch 2550/3125   train_loss = 1.239\n",
      "2018-12-27T17:07:26.367468: Epoch   2 Batch 2570/3125   train_loss = 1.149\n",
      "2018-12-27T17:07:27.632087: Epoch   2 Batch 2590/3125   train_loss = 1.226\n",
      "2018-12-27T17:07:28.840856: Epoch   2 Batch 2610/3125   train_loss = 1.310\n",
      "2018-12-27T17:07:30.222164: Epoch   2 Batch 2630/3125   train_loss = 0.882\n",
      "2018-12-27T17:07:31.716170: Epoch   2 Batch 2650/3125   train_loss = 1.151\n",
      "2018-12-27T17:07:32.947878: Epoch   2 Batch 2670/3125   train_loss = 1.167\n",
      "2018-12-27T17:07:34.350133: Epoch   2 Batch 2690/3125   train_loss = 1.159\n",
      "2018-12-27T17:07:35.746397: Epoch   2 Batch 2710/3125   train_loss = 1.066\n",
      "2018-12-27T17:07:36.949182: Epoch   2 Batch 2730/3125   train_loss = 1.209\n",
      "2018-12-27T17:07:38.144984: Epoch   2 Batch 2750/3125   train_loss = 1.133\n",
      "2018-12-27T17:07:39.373700: Epoch   2 Batch 2770/3125   train_loss = 1.104\n",
      "2018-12-27T17:07:40.698670: Epoch   2 Batch 2790/3125   train_loss = 1.108\n",
      "2018-12-27T17:07:42.041641: Epoch   2 Batch 2810/3125   train_loss = 1.218\n",
      "2018-12-27T17:07:43.297284: Epoch   2 Batch 2830/3125   train_loss = 1.040\n",
      "2018-12-27T17:07:44.480123: Epoch   2 Batch 2850/3125   train_loss = 1.324\n",
      "2018-12-27T17:07:45.772154: Epoch   2 Batch 2870/3125   train_loss = 0.956\n",
      "2018-12-27T17:07:47.098938: Epoch   2 Batch 2890/3125   train_loss = 1.015\n",
      "2018-12-27T17:07:48.292730: Epoch   2 Batch 2910/3125   train_loss = 1.256\n",
      "2018-12-27T17:07:49.473575: Epoch   2 Batch 2930/3125   train_loss = 1.072\n",
      "2018-12-27T17:07:50.622502: Epoch   2 Batch 2950/3125   train_loss = 1.364\n",
      "2018-12-27T17:07:51.816311: Epoch   2 Batch 2970/3125   train_loss = 1.095\n",
      "2018-12-27T17:07:52.994162: Epoch   2 Batch 2990/3125   train_loss = 1.009\n",
      "2018-12-27T17:07:54.152072: Epoch   2 Batch 3010/3125   train_loss = 1.056\n",
      "2018-12-27T17:07:55.354854: Epoch   2 Batch 3030/3125   train_loss = 1.088\n",
      "2018-12-27T17:07:56.754111: Epoch   2 Batch 3050/3125   train_loss = 1.163\n",
      "2018-12-27T17:07:58.100048: Epoch   2 Batch 3070/3125   train_loss = 1.132\n",
      "2018-12-27T17:07:59.277897: Epoch   2 Batch 3090/3125   train_loss = 1.030\n",
      "2018-12-27T17:08:00.429818: Epoch   2 Batch 3110/3125   train_loss = 1.055\n",
      "2018-12-27T17:08:01.608719: Epoch   2 Batch   18/781   test_loss = 1.031\n",
      "2018-12-27T17:08:01.945817: Epoch   2 Batch   38/781   test_loss = 1.178\n",
      "2018-12-27T17:08:02.274938: Epoch   2 Batch   58/781   test_loss = 1.156\n",
      "2018-12-27T17:08:02.630986: Epoch   2 Batch   78/781   test_loss = 1.120\n",
      "2018-12-27T17:08:02.959109: Epoch   2 Batch   98/781   test_loss = 1.137\n",
      "2018-12-27T17:08:03.297205: Epoch   2 Batch  118/781   test_loss = 1.141\n",
      "2018-12-27T17:08:03.613360: Epoch   2 Batch  138/781   test_loss = 1.235\n",
      "2018-12-27T17:08:03.974396: Epoch   2 Batch  158/781   test_loss = 1.203\n",
      "2018-12-27T17:08:04.333436: Epoch   2 Batch  178/781   test_loss = 0.992\n",
      "2018-12-27T17:08:04.654578: Epoch   2 Batch  198/781   test_loss = 1.230\n",
      "2018-12-27T17:08:04.976715: Epoch   2 Batch  218/781   test_loss = 1.250\n",
      "2018-12-27T17:08:05.305836: Epoch   2 Batch  238/781   test_loss = 1.188\n",
      "2018-12-27T17:08:05.662881: Epoch   2 Batch  258/781   test_loss = 1.255\n",
      "2018-12-27T17:08:06.006962: Epoch   2 Batch  278/781   test_loss = 1.278\n",
      "2018-12-27T17:08:06.320124: Epoch   2 Batch  298/781   test_loss = 1.157\n",
      "2018-12-27T17:08:06.633287: Epoch   2 Batch  318/781   test_loss = 1.180\n",
      "2018-12-27T17:08:06.968391: Epoch   2 Batch  338/781   test_loss = 1.214\n",
      "2018-12-27T17:08:07.334414: Epoch   2 Batch  358/781   test_loss = 1.115\n",
      "2018-12-27T17:08:07.678493: Epoch   2 Batch  378/781   test_loss = 1.128\n",
      "2018-12-27T17:08:08.019582: Epoch   2 Batch  398/781   test_loss = 1.012\n",
      "2018-12-27T17:08:08.345711: Epoch   2 Batch  418/781   test_loss = 1.174\n",
      "2018-12-27T17:08:08.657875: Epoch   2 Batch  438/781   test_loss = 1.337\n",
      "2018-12-27T17:08:08.982008: Epoch   2 Batch  458/781   test_loss = 1.198\n",
      "2018-12-27T17:08:09.308137: Epoch   2 Batch  478/781   test_loss = 1.335\n",
      "2018-12-27T17:08:09.624292: Epoch   2 Batch  498/781   test_loss = 1.050\n",
      "2018-12-27T17:08:09.949422: Epoch   2 Batch  518/781   test_loss = 1.225\n",
      "2018-12-27T17:08:10.309461: Epoch   2 Batch  538/781   test_loss = 1.030\n",
      "2018-12-27T17:08:10.653540: Epoch   2 Batch  558/781   test_loss = 1.099\n",
      "2018-12-27T17:08:10.968698: Epoch   2 Batch  578/781   test_loss = 1.090\n",
      "2018-12-27T17:08:11.297818: Epoch   2 Batch  598/781   test_loss = 1.240\n",
      "2018-12-27T17:08:11.628934: Epoch   2 Batch  618/781   test_loss = 1.095\n",
      "2018-12-27T17:08:11.971019: Epoch   2 Batch  638/781   test_loss = 1.128\n",
      "2018-12-27T17:08:12.299142: Epoch   2 Batch  658/781   test_loss = 1.269\n",
      "2018-12-27T17:08:12.627264: Epoch   2 Batch  678/781   test_loss = 1.172\n",
      "2018-12-27T17:08:12.953392: Epoch   2 Batch  698/781   test_loss = 1.117\n",
      "2018-12-27T17:08:13.340358: Epoch   2 Batch  718/781   test_loss = 1.254\n",
      "2018-12-27T17:08:13.664492: Epoch   2 Batch  738/781   test_loss = 1.102\n",
      "2018-12-27T17:08:13.993611: Epoch   2 Batch  758/781   test_loss = 1.101\n",
      "2018-12-27T17:08:14.329713: Epoch   2 Batch  778/781   test_loss = 1.374\n",
      "2018-12-27T17:08:15.315080: Epoch   3 Batch    5/3125   train_loss = 1.200\n",
      "2018-12-27T17:08:16.626573: Epoch   3 Batch   25/3125   train_loss = 1.165\n",
      "2018-12-27T17:08:17.841325: Epoch   3 Batch   45/3125   train_loss = 1.127\n",
      "2018-12-27T17:08:19.143843: Epoch   3 Batch   65/3125   train_loss = 1.259\n",
      "2018-12-27T17:08:20.431403: Epoch   3 Batch   85/3125   train_loss = 1.031\n",
      "2018-12-27T17:08:21.623215: Epoch   3 Batch  105/3125   train_loss = 0.996\n",
      "2018-12-27T17:08:22.869883: Epoch   3 Batch  125/3125   train_loss = 1.146\n",
      "2018-12-27T17:08:24.041754: Epoch   3 Batch  145/3125   train_loss = 1.245\n",
      "2018-12-27T17:08:25.271465: Epoch   3 Batch  165/3125   train_loss = 1.175\n",
      "2018-12-27T17:08:26.501177: Epoch   3 Batch  185/3125   train_loss = 1.134\n",
      "2018-12-27T17:08:27.640131: Epoch   3 Batch  205/3125   train_loss = 1.080\n",
      "2018-12-27T17:08:28.871839: Epoch   3 Batch  225/3125   train_loss = 0.970\n",
      "2018-12-27T17:08:30.012789: Epoch   3 Batch  245/3125   train_loss = 1.317\n",
      "2018-12-27T17:08:31.199618: Epoch   3 Batch  265/3125   train_loss = 1.067\n",
      "2018-12-27T17:08:32.398413: Epoch   3 Batch  285/3125   train_loss = 1.249\n",
      "2018-12-27T17:08:33.599201: Epoch   3 Batch  305/3125   train_loss = 1.147\n",
      "2018-12-27T17:08:34.879799: Epoch   3 Batch  325/3125   train_loss = 1.130\n",
      "2018-12-27T17:08:36.024740: Epoch   3 Batch  345/3125   train_loss = 1.067\n",
      "2018-12-27T17:08:37.187631: Epoch   3 Batch  365/3125   train_loss = 1.221\n",
      "2018-12-27T17:08:38.441846: Epoch   3 Batch  385/3125   train_loss = 1.107\n",
      "2018-12-27T17:08:39.595762: Epoch   3 Batch  405/3125   train_loss = 1.096\n",
      "2018-12-27T17:08:40.794557: Epoch   3 Batch  425/3125   train_loss = 1.134\n",
      "2018-12-27T17:08:41.967422: Epoch   3 Batch  445/3125   train_loss = 1.072\n",
      "2018-12-27T17:08:43.159235: Epoch   3 Batch  465/3125   train_loss = 1.103\n",
      "2018-12-27T17:08:44.323124: Epoch   3 Batch  485/3125   train_loss = 1.322\n",
      "2018-12-27T17:08:45.585781: Epoch   3 Batch  505/3125   train_loss = 1.031\n",
      "2018-12-27T17:08:46.788565: Epoch   3 Batch  525/3125   train_loss = 1.118\n",
      "2018-12-27T17:08:47.937495: Epoch   3 Batch  545/3125   train_loss = 1.087\n",
      "2018-12-27T17:08:49.269931: Epoch   3 Batch  565/3125   train_loss = 1.241\n",
      "2018-12-27T17:08:50.552546: Epoch   3 Batch  585/3125   train_loss = 1.001\n",
      "2018-12-27T17:08:51.671555: Epoch   3 Batch  605/3125   train_loss = 1.175\n",
      "2018-12-27T17:08:52.892290: Epoch   3 Batch  625/3125   train_loss = 1.105\n",
      "2018-12-27T17:08:54.013295: Epoch   3 Batch  645/3125   train_loss = 1.267\n",
      "2018-12-27T17:08:55.138288: Epoch   3 Batch  665/3125   train_loss = 1.249\n",
      "2018-12-27T17:08:56.401460: Epoch   3 Batch  685/3125   train_loss = 1.140\n",
      "2018-12-27T17:08:57.521465: Epoch   3 Batch  705/3125   train_loss = 1.410\n",
      "2018-12-27T17:08:58.733226: Epoch   3 Batch  725/3125   train_loss = 1.129\n",
      "2018-12-27T17:08:59.865200: Epoch   3 Batch  745/3125   train_loss = 1.196\n",
      "2018-12-27T17:09:00.985205: Epoch   3 Batch  765/3125   train_loss = 1.071\n",
      "2018-12-27T17:09:02.197964: Epoch   3 Batch  785/3125   train_loss = 1.221\n",
      "2018-12-27T17:09:03.406698: Epoch   3 Batch  805/3125   train_loss = 1.128\n",
      "2018-12-27T17:09:04.623450: Epoch   3 Batch  825/3125   train_loss = 1.010\n",
      "2018-12-27T17:09:05.805287: Epoch   3 Batch  845/3125   train_loss = 1.058\n",
      "2018-12-27T17:09:06.945239: Epoch   3 Batch  865/3125   train_loss = 1.126\n",
      "2018-12-27T17:09:08.144034: Epoch   3 Batch  885/3125   train_loss = 1.153\n",
      "2018-12-27T17:09:09.290969: Epoch   3 Batch  905/3125   train_loss = 1.320\n",
      "2018-12-27T17:09:10.496744: Epoch   3 Batch  925/3125   train_loss = 1.096\n",
      "2018-12-27T17:09:11.651658: Epoch   3 Batch  945/3125   train_loss = 1.258\n",
      "2018-12-27T17:09:12.788619: Epoch   3 Batch  965/3125   train_loss = 1.000\n",
      "2018-12-27T17:09:14.035286: Epoch   3 Batch  985/3125   train_loss = 1.199\n",
      "2018-12-27T17:09:15.185211: Epoch   3 Batch 1005/3125   train_loss = 1.070\n",
      "2018-12-27T17:09:16.365058: Epoch   3 Batch 1025/3125   train_loss = 1.165\n",
      "2018-12-27T17:09:17.534930: Epoch   3 Batch 1045/3125   train_loss = 1.309\n",
      "2018-12-27T17:09:18.691837: Epoch   3 Batch 1065/3125   train_loss = 1.083\n",
      "2018-12-27T17:09:19.869692: Epoch   3 Batch 1085/3125   train_loss = 0.941\n",
      "2018-12-27T17:09:20.996677: Epoch   3 Batch 1105/3125   train_loss = 0.971\n",
      "2018-12-27T17:09:22.209434: Epoch   3 Batch 1125/3125   train_loss = 1.041\n",
      "2018-12-27T17:09:23.425184: Epoch   3 Batch 1145/3125   train_loss = 1.190\n",
      "2018-12-27T17:09:24.600045: Epoch   3 Batch 1165/3125   train_loss = 1.270\n",
      "2018-12-27T17:09:25.919517: Epoch   3 Batch 1185/3125   train_loss = 0.983\n",
      "2018-12-27T17:09:27.159203: Epoch   3 Batch 1205/3125   train_loss = 1.132\n",
      "2018-12-27T17:09:28.499619: Epoch   3 Batch 1225/3125   train_loss = 1.162\n",
      "2018-12-27T17:09:29.941764: Epoch   3 Batch 1245/3125   train_loss = 1.293\n",
      "2018-12-27T17:09:31.272209: Epoch   3 Batch 1265/3125   train_loss = 1.126\n",
      "2018-12-27T17:09:32.478982: Epoch   3 Batch 1285/3125   train_loss = 1.223\n",
      "2018-12-27T17:09:33.615942: Epoch   3 Batch 1305/3125   train_loss = 1.061\n",
      "2018-12-27T17:09:34.829697: Epoch   3 Batch 1325/3125   train_loss = 1.068\n",
      "2018-12-27T17:09:35.986671: Epoch   3 Batch 1345/3125   train_loss = 1.106\n",
      "2018-12-27T17:09:37.209402: Epoch   3 Batch 1365/3125   train_loss = 1.013\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2018-12-27T17:09:38.460059: Epoch   3 Batch 1385/3125   train_loss = 1.103\n",
      "2018-12-27T17:09:39.828401: Epoch   3 Batch 1405/3125   train_loss = 1.084\n",
      "2018-12-27T17:09:41.083047: Epoch   3 Batch 1425/3125   train_loss = 1.192\n",
      "2018-12-27T17:09:42.237960: Epoch   3 Batch 1445/3125   train_loss = 1.283\n",
      "2018-12-27T17:09:43.469668: Epoch   3 Batch 1465/3125   train_loss = 1.133\n",
      "2018-12-27T17:09:44.694393: Epoch   3 Batch 1485/3125   train_loss = 1.175\n",
      "2018-12-27T17:09:46.075702: Epoch   3 Batch 1505/3125   train_loss = 0.999\n",
      "2018-12-27T17:09:47.380333: Epoch   3 Batch 1525/3125   train_loss = 1.024\n",
      "2018-12-27T17:09:48.515298: Epoch   3 Batch 1545/3125   train_loss = 1.038\n",
      "2018-12-27T17:09:49.770941: Epoch   3 Batch 1565/3125   train_loss = 1.159\n",
      "2018-12-27T17:09:51.067476: Epoch   3 Batch 1585/3125   train_loss = 1.148\n",
      "2018-12-27T17:09:52.656229: Epoch   3 Batch 1605/3125   train_loss = 0.997\n",
      "2018-12-27T17:09:54.130589: Epoch   3 Batch 1625/3125   train_loss = 1.147\n",
      "2018-12-27T17:09:55.626584: Epoch   3 Batch 1645/3125   train_loss = 1.127\n",
      "2018-12-27T17:09:57.029828: Epoch   3 Batch 1665/3125   train_loss = 1.160\n",
      "2018-12-27T17:09:58.371244: Epoch   3 Batch 1685/3125   train_loss = 1.186\n",
      "2018-12-27T17:09:59.610929: Epoch   3 Batch 1705/3125   train_loss = 1.111\n",
      "2018-12-27T17:10:01.050599: Epoch   3 Batch 1725/3125   train_loss = 1.101\n",
      "2018-12-27T17:10:02.314220: Epoch   3 Batch 1745/3125   train_loss = 1.058\n",
      "2018-12-27T17:10:03.470132: Epoch   3 Batch 1765/3125   train_loss = 1.012\n",
      "2018-12-27T17:10:04.800574: Epoch   3 Batch 1785/3125   train_loss = 1.190\n",
      "2018-12-27T17:10:06.188863: Epoch   3 Batch 1805/3125   train_loss = 1.159\n",
      "2018-12-27T17:10:07.332805: Epoch   3 Batch 1825/3125   train_loss = 1.212\n",
      "2018-12-27T17:10:08.499685: Epoch   3 Batch 1845/3125   train_loss = 1.170\n",
      "2018-12-27T17:10:09.695489: Epoch   3 Batch 1865/3125   train_loss = 1.102\n",
      "2018-12-27T17:10:10.985041: Epoch   3 Batch 1885/3125   train_loss = 1.223\n",
      "2018-12-27T17:10:12.368343: Epoch   3 Batch 1905/3125   train_loss = 1.041\n",
      "2018-12-27T17:10:13.567140: Epoch   3 Batch 1925/3125   train_loss = 1.041\n",
      "2018-12-27T17:10:14.740004: Epoch   3 Batch 1945/3125   train_loss = 1.299\n",
      "2018-12-27T17:10:15.987669: Epoch   3 Batch 1965/3125   train_loss = 1.114\n",
      "2018-12-27T17:10:17.170507: Epoch   3 Batch 1985/3125   train_loss = 1.114\n",
      "2018-12-27T17:10:18.314449: Epoch   3 Batch 2005/3125   train_loss = 1.184\n",
      "2018-12-27T17:10:19.518231: Epoch   3 Batch 2025/3125   train_loss = 1.079\n",
      "2018-12-27T17:10:20.666162: Epoch   3 Batch 2045/3125   train_loss = 1.025\n",
      "2018-12-27T17:10:21.796142: Epoch   3 Batch 2065/3125   train_loss = 1.033\n",
      "2018-12-27T17:10:23.052782: Epoch   3 Batch 2085/3125   train_loss = 1.397\n",
      "2018-12-27T17:10:24.296458: Epoch   3 Batch 2105/3125   train_loss = 0.963\n",
      "2018-12-27T17:10:25.468325: Epoch   3 Batch 2125/3125   train_loss = 1.235\n",
      "2018-12-27T17:10:26.712001: Epoch   3 Batch 2145/3125   train_loss = 1.164\n",
      "2018-12-27T17:10:27.990581: Epoch   3 Batch 2165/3125   train_loss = 1.125\n",
      "2018-12-27T17:10:29.445692: Epoch   3 Batch 2185/3125   train_loss = 1.274\n",
      "2018-12-27T17:10:30.821016: Epoch   3 Batch 2205/3125   train_loss = 1.197\n",
      "2018-12-27T17:10:32.302057: Epoch   3 Batch 2225/3125   train_loss = 1.074\n",
      "2018-12-27T17:10:33.628510: Epoch   3 Batch 2245/3125   train_loss = 1.043\n",
      "2018-12-27T17:10:34.995856: Epoch   3 Batch 2265/3125   train_loss = 1.222\n",
      "2018-12-27T17:10:36.526762: Epoch   3 Batch 2285/3125   train_loss = 1.309\n",
      "2018-12-27T17:10:37.898097: Epoch   3 Batch 2305/3125   train_loss = 1.035\n",
      "2018-12-27T17:10:39.062983: Epoch   3 Batch 2325/3125   train_loss = 1.093\n",
      "2018-12-27T17:10:40.262777: Epoch   3 Batch 2345/3125   train_loss = 1.062\n",
      "2018-12-27T17:10:41.476532: Epoch   3 Batch 2365/3125   train_loss = 0.968\n",
      "2018-12-27T17:10:42.616484: Epoch   3 Batch 2385/3125   train_loss = 1.173\n",
      "2018-12-27T17:10:43.850186: Epoch   3 Batch 2405/3125   train_loss = 1.072\n",
      "2018-12-27T17:10:45.021055: Epoch   3 Batch 2425/3125   train_loss = 1.043\n",
      "2018-12-27T17:10:46.203894: Epoch   3 Batch 2445/3125   train_loss = 1.082\n",
      "2018-12-27T17:10:47.691916: Epoch   3 Batch 2465/3125   train_loss = 1.073\n",
      "2018-12-27T17:10:48.925619: Epoch   3 Batch 2485/3125   train_loss = 1.022\n",
      "2018-12-27T17:10:50.203202: Epoch   3 Batch 2505/3125   train_loss = 1.112\n",
      "2018-12-27T17:10:51.398009: Epoch   3 Batch 2525/3125   train_loss = 1.143\n",
      "2018-12-27T17:10:52.521007: Epoch   3 Batch 2545/3125   train_loss = 1.235\n",
      "2018-12-27T17:10:53.612089: Epoch   3 Batch 2565/3125   train_loss = 1.103\n",
      "2018-12-27T17:10:54.705168: Epoch   3 Batch 2585/3125   train_loss = 1.010\n",
      "2018-12-27T17:10:56.364737: Epoch   3 Batch 2605/3125   train_loss = 1.038\n",
      "2018-12-27T17:10:57.588460: Epoch   3 Batch 2625/3125   train_loss = 1.312\n",
      "2018-12-27T17:10:59.247028: Epoch   3 Batch 2645/3125   train_loss = 1.108\n",
      "2018-12-27T17:11:00.629331: Epoch   3 Batch 2665/3125   train_loss = 1.221\n",
      "2018-12-27T17:11:01.871012: Epoch   3 Batch 2685/3125   train_loss = 1.153\n",
      "2018-12-27T17:11:03.008969: Epoch   3 Batch 2705/3125   train_loss = 0.999\n",
      "2018-12-27T17:11:04.160890: Epoch   3 Batch 2725/3125   train_loss = 1.195\n",
      "2018-12-27T17:11:05.467398: Epoch   3 Batch 2745/3125   train_loss = 1.192\n",
      "2018-12-27T17:11:06.618320: Epoch   3 Batch 2765/3125   train_loss = 1.059\n",
      "2018-12-27T17:11:07.727356: Epoch   3 Batch 2785/3125   train_loss = 1.338\n",
      "2018-12-27T17:11:08.788520: Epoch   3 Batch 2805/3125   train_loss = 1.120\n",
      "2018-12-27T17:11:09.984324: Epoch   3 Batch 2825/3125   train_loss = 1.130\n",
      "2018-12-27T17:11:11.353674: Epoch   3 Batch 2845/3125   train_loss = 1.133\n",
      "2018-12-27T17:11:12.528522: Epoch   3 Batch 2865/3125   train_loss = 0.988\n",
      "2018-12-27T17:11:13.698395: Epoch   3 Batch 2885/3125   train_loss = 1.151\n",
      "2018-12-27T17:11:14.812417: Epoch   3 Batch 2905/3125   train_loss = 0.951\n",
      "2018-12-27T17:11:16.019191: Epoch   3 Batch 2925/3125   train_loss = 1.082\n",
      "2018-12-27T17:11:17.228959: Epoch   3 Batch 2945/3125   train_loss = 1.363\n",
      "2018-12-27T17:11:18.335997: Epoch   3 Batch 2965/3125   train_loss = 1.250\n",
      "2018-12-27T17:11:19.460990: Epoch   3 Batch 2985/3125   train_loss = 1.073\n",
      "2018-12-27T17:11:20.660782: Epoch   3 Batch 3005/3125   train_loss = 0.974\n",
      "2018-12-27T17:11:21.888501: Epoch   3 Batch 3025/3125   train_loss = 1.206\n",
      "2018-12-27T17:11:23.102256: Epoch   3 Batch 3045/3125   train_loss = 1.249\n",
      "2018-12-27T17:11:24.287089: Epoch   3 Batch 3065/3125   train_loss = 1.078\n",
      "2018-12-27T17:11:25.452972: Epoch   3 Batch 3085/3125   train_loss = 1.150\n",
      "2018-12-27T17:11:26.630823: Epoch   3 Batch 3105/3125   train_loss = 1.223\n",
      "2018-12-27T17:11:27.986200: Epoch   3 Batch   17/781   test_loss = 1.154\n",
      "2018-12-27T17:11:28.336264: Epoch   3 Batch   37/781   test_loss = 1.193\n",
      "2018-12-27T17:11:28.733204: Epoch   3 Batch   57/781   test_loss = 1.200\n",
      "2018-12-27T17:11:29.070302: Epoch   3 Batch   77/781   test_loss = 1.181\n",
      "2018-12-27T17:11:29.400419: Epoch   3 Batch   97/781   test_loss = 1.002\n",
      "2018-12-27T17:11:29.718568: Epoch   3 Batch  117/781   test_loss = 1.340\n",
      "2018-12-27T17:11:30.016771: Epoch   3 Batch  137/781   test_loss = 1.210\n",
      "2018-12-27T17:11:30.342899: Epoch   3 Batch  157/781   test_loss = 1.212\n",
      "2018-12-27T17:11:30.685982: Epoch   3 Batch  177/781   test_loss = 1.158\n",
      "2018-12-27T17:11:30.992164: Epoch   3 Batch  197/781   test_loss = 1.112\n",
      "2018-12-27T17:11:31.354207: Epoch   3 Batch  217/781   test_loss = 1.022\n",
      "2018-12-27T17:11:31.699273: Epoch   3 Batch  237/781   test_loss = 0.959\n",
      "2018-12-27T17:11:32.045349: Epoch   3 Batch  257/781   test_loss = 1.317\n",
      "2018-12-27T17:11:32.446278: Epoch   3 Batch  277/781   test_loss = 1.170\n",
      "2018-12-27T17:11:32.888095: Epoch   3 Batch  297/781   test_loss = 1.279\n",
      "2018-12-27T17:11:33.332907: Epoch   3 Batch  317/781   test_loss = 1.318\n",
      "2018-12-27T17:11:33.768741: Epoch   3 Batch  337/781   test_loss = 1.161\n",
      "2018-12-27T17:11:34.218539: Epoch   3 Batch  357/781   test_loss = 1.288\n",
      "2018-12-27T17:11:34.649387: Epoch   3 Batch  377/781   test_loss = 1.145\n",
      "2018-12-27T17:11:35.128109: Epoch   3 Batch  397/781   test_loss = 1.077\n",
      "2018-12-27T17:11:35.563944: Epoch   3 Batch  417/781   test_loss = 1.149\n",
      "2018-12-27T17:11:36.012743: Epoch   3 Batch  437/781   test_loss = 0.987\n",
      "2018-12-27T17:11:36.449575: Epoch   3 Batch  457/781   test_loss = 0.894\n",
      "2018-12-27T17:11:36.764732: Epoch   3 Batch  477/781   test_loss = 1.152\n",
      "2018-12-27T17:11:37.114797: Epoch   3 Batch  497/781   test_loss = 1.055\n",
      "2018-12-27T17:11:37.493783: Epoch   3 Batch  517/781   test_loss = 1.149\n",
      "2018-12-27T17:11:37.898700: Epoch   3 Batch  537/781   test_loss = 1.144\n",
      "2018-12-27T17:11:38.351491: Epoch   3 Batch  557/781   test_loss = 1.121\n",
      "2018-12-27T17:11:38.687593: Epoch   3 Batch  577/781   test_loss = 1.124\n",
      "2018-12-27T17:11:39.112457: Epoch   3 Batch  597/781   test_loss = 1.120\n",
      "2018-12-27T17:11:39.437588: Epoch   3 Batch  617/781   test_loss = 1.076\n",
      "2018-12-27T17:11:39.766711: Epoch   3 Batch  637/781   test_loss = 1.003\n",
      "2018-12-27T17:11:40.218500: Epoch   3 Batch  657/781   test_loss = 1.458\n",
      "2018-12-27T17:11:40.622420: Epoch   3 Batch  677/781   test_loss = 1.036\n",
      "2018-12-27T17:11:41.011380: Epoch   3 Batch  697/781   test_loss = 1.210\n",
      "2018-12-27T17:11:41.424276: Epoch   3 Batch  717/781   test_loss = 1.023\n",
      "2018-12-27T17:11:41.846149: Epoch   3 Batch  737/781   test_loss = 1.028\n",
      "2018-12-27T17:11:42.253062: Epoch   3 Batch  757/781   test_loss = 1.364\n",
      "2018-12-27T17:11:42.636038: Epoch   3 Batch  777/781   test_loss = 1.323\n",
      "2018-12-27T17:11:43.354119: Epoch   4 Batch    0/3125   train_loss = 1.327\n",
      "2018-12-27T17:11:44.496067: Epoch   4 Batch   20/3125   train_loss = 1.003\n",
      "2018-12-27T17:11:45.637015: Epoch   4 Batch   40/3125   train_loss = 1.155\n",
      "2018-12-27T17:11:46.773983: Epoch   4 Batch   60/3125   train_loss = 0.992\n",
      "2018-12-27T17:11:47.889003: Epoch   4 Batch   80/3125   train_loss = 1.091\n",
      "2018-12-27T17:11:48.987067: Epoch   4 Batch  100/3125   train_loss = 1.132\n",
      "2018-12-27T17:11:50.126033: Epoch   4 Batch  120/3125   train_loss = 1.181\n",
      "2018-12-27T17:11:51.228077: Epoch   4 Batch  140/3125   train_loss = 1.205\n",
      "2018-12-27T17:11:52.342098: Epoch   4 Batch  160/3125   train_loss = 0.985\n",
      "2018-12-27T17:11:53.476067: Epoch   4 Batch  180/3125   train_loss = 1.050\n",
      "2018-12-27T17:11:54.572138: Epoch   4 Batch  200/3125   train_loss = 1.231\n",
      "2018-12-27T17:11:55.895600: Epoch   4 Batch  220/3125   train_loss = 1.194\n",
      "2018-12-27T17:11:57.250976: Epoch   4 Batch  240/3125   train_loss = 1.111\n",
      "2018-12-27T17:11:58.505622: Epoch   4 Batch  260/3125   train_loss = 1.115\n",
      "2018-12-27T17:11:59.842049: Epoch   4 Batch  280/3125   train_loss = 1.252\n",
      "2018-12-27T17:12:01.034862: Epoch   4 Batch  300/3125   train_loss = 1.239\n",
      "2018-12-27T17:12:02.402205: Epoch   4 Batch  320/3125   train_loss = 1.205\n",
      "2018-12-27T17:12:03.926133: Epoch   4 Batch  340/3125   train_loss = 1.026\n",
      "2018-12-27T17:12:05.586692: Epoch   4 Batch  360/3125   train_loss = 1.127\n",
      "2018-12-27T17:12:07.044794: Epoch   4 Batch  380/3125   train_loss = 1.060\n",
      "2018-12-27T17:12:08.383217: Epoch   4 Batch  400/3125   train_loss = 1.015\n",
      "2018-12-27T17:12:09.539127: Epoch   4 Batch  420/3125   train_loss = 1.082\n",
      "2018-12-27T17:12:10.708999: Epoch   4 Batch  440/3125   train_loss = 1.097\n",
      "2018-12-27T17:12:11.898819: Epoch   4 Batch  460/3125   train_loss = 1.125\n",
      "2018-12-27T17:12:13.061710: Epoch   4 Batch  480/3125   train_loss = 1.256\n",
      "2018-12-27T17:12:14.270478: Epoch   4 Batch  500/3125   train_loss = 0.890\n",
      "2018-12-27T17:12:15.331642: Epoch   4 Batch  520/3125   train_loss = 1.101\n",
      "2018-12-27T17:12:16.439679: Epoch   4 Batch  540/3125   train_loss = 1.017\n",
      "2018-12-27T17:12:17.580631: Epoch   4 Batch  560/3125   train_loss = 1.174\n",
      "2018-12-27T17:12:18.629825: Epoch   4 Batch  580/3125   train_loss = 1.208\n",
      "2018-12-27T17:12:19.916386: Epoch   4 Batch  600/3125   train_loss = 1.234\n",
      "2018-12-27T17:12:21.085261: Epoch   4 Batch  620/3125   train_loss = 1.214\n",
      "2018-12-27T17:12:22.388776: Epoch   4 Batch  640/3125   train_loss = 1.149\n",
      "2018-12-27T17:12:23.755124: Epoch   4 Batch  660/3125   train_loss = 1.131\n",
      "2018-12-27T17:12:25.148400: Epoch   4 Batch  680/3125   train_loss = 1.005\n",
      "2018-12-27T17:12:26.541676: Epoch   4 Batch  700/3125   train_loss = 1.136\n",
      "2018-12-27T17:12:27.767398: Epoch   4 Batch  720/3125   train_loss = 1.068\n",
      "2018-12-27T17:12:29.074903: Epoch   4 Batch  740/3125   train_loss = 1.233\n",
      "2018-12-27T17:12:30.142049: Epoch   4 Batch  760/3125   train_loss = 1.106\n",
      "2018-12-27T17:12:31.192243: Epoch   4 Batch  780/3125   train_loss = 1.265\n",
      "2018-12-27T17:12:32.410985: Epoch   4 Batch  800/3125   train_loss = 1.139\n",
      "2018-12-27T17:12:33.698543: Epoch   4 Batch  820/3125   train_loss = 1.062\n",
      "2018-12-27T17:12:34.998069: Epoch   4 Batch  840/3125   train_loss = 1.125\n",
      "2018-12-27T17:12:36.577846: Epoch   4 Batch  860/3125   train_loss = 1.077\n",
      "2018-12-27T17:12:37.795590: Epoch   4 Batch  880/3125   train_loss = 1.129\n",
      "2018-12-27T17:12:39.556453: Epoch   4 Batch  900/3125   train_loss = 1.067\n",
      "2018-12-27T17:12:41.547136: Epoch   4 Batch  920/3125   train_loss = 1.193\n",
      "2018-12-27T17:12:43.223650: Epoch   4 Batch  940/3125   train_loss = 1.183\n",
      "2018-12-27T17:12:44.552099: Epoch   4 Batch  960/3125   train_loss = 1.162\n",
      "2018-12-27T17:12:45.982278: Epoch   4 Batch  980/3125   train_loss = 1.149\n",
      "2018-12-27T17:12:47.445371: Epoch   4 Batch 1000/3125   train_loss = 1.136\n",
      "2018-12-27T17:12:48.688042: Epoch   4 Batch 1020/3125   train_loss = 1.180\n",
      "2018-12-27T17:12:49.921744: Epoch   4 Batch 1040/3125   train_loss = 1.088\n",
      "2018-12-27T17:12:51.049728: Epoch   4 Batch 1060/3125   train_loss = 1.321\n",
      "2018-12-27T17:12:52.136823: Epoch   4 Batch 1080/3125   train_loss = 1.084\n",
      "2018-12-27T17:12:53.235884: Epoch   4 Batch 1100/3125   train_loss = 1.109\n",
      "2018-12-27T17:12:54.336941: Epoch   4 Batch 1120/3125   train_loss = 1.083\n",
      "2018-12-27T17:12:55.409075: Epoch   4 Batch 1140/3125   train_loss = 1.108\n",
      "2018-12-27T17:12:56.505145: Epoch   4 Batch 1160/3125   train_loss = 1.174\n",
      "2018-12-27T17:12:57.572293: Epoch   4 Batch 1180/3125   train_loss = 1.124\n",
      "2018-12-27T17:12:58.767118: Epoch   4 Batch 1200/3125   train_loss = 1.091\n",
      "2018-12-27T17:12:59.805340: Epoch   4 Batch 1220/3125   train_loss = 1.122\n",
      "2018-12-27T17:13:00.867501: Epoch   4 Batch 1240/3125   train_loss = 0.928\n",
      "2018-12-27T17:13:01.967563: Epoch   4 Batch 1260/3125   train_loss = 1.059\n",
      "2018-12-27T17:13:03.094548: Epoch   4 Batch 1280/3125   train_loss = 1.154\n",
      "2018-12-27T17:13:04.187626: Epoch   4 Batch 1300/3125   train_loss = 1.031\n",
      "2018-12-27T17:13:05.312619: Epoch   4 Batch 1320/3125   train_loss = 1.098\n",
      "2018-12-27T17:13:06.446587: Epoch   4 Batch 1340/3125   train_loss = 0.912\n",
      "2018-12-27T17:13:07.542657: Epoch   4 Batch 1360/3125   train_loss = 1.026\n",
      "2018-12-27T17:13:08.657677: Epoch   4 Batch 1380/3125   train_loss = 1.061\n",
      "2018-12-27T17:13:09.744770: Epoch   4 Batch 1400/3125   train_loss = 1.189\n",
      "2018-12-27T17:13:10.859790: Epoch   4 Batch 1420/3125   train_loss = 1.244\n",
      "2018-12-27T17:13:11.952868: Epoch   4 Batch 1440/3125   train_loss = 1.017\n",
      "2018-12-27T17:13:13.047941: Epoch   4 Batch 1460/3125   train_loss = 1.116\n",
      "2018-12-27T17:13:14.136031: Epoch   4 Batch 1480/3125   train_loss = 1.150\n",
      "2018-12-27T17:13:15.196198: Epoch   4 Batch 1500/3125   train_loss = 1.129\n",
      "2018-12-27T17:13:16.302242: Epoch   4 Batch 1520/3125   train_loss = 1.094\n",
      "2018-12-27T17:13:17.456156: Epoch   4 Batch 1540/3125   train_loss = 1.135\n",
      "2018-12-27T17:13:18.551229: Epoch   4 Batch 1560/3125   train_loss = 1.086\n",
      "2018-12-27T17:13:19.629346: Epoch   4 Batch 1580/3125   train_loss = 1.247\n",
      "2018-12-27T17:13:20.731402: Epoch   4 Batch 1600/3125   train_loss = 1.137\n",
      "2018-12-27T17:13:21.829465: Epoch   4 Batch 1620/3125   train_loss = 1.150\n",
      "2018-12-27T17:13:22.974404: Epoch   4 Batch 1640/3125   train_loss = 1.151\n",
      "2018-12-27T17:13:24.056512: Epoch   4 Batch 1660/3125   train_loss = 1.147\n",
      "2018-12-27T17:13:25.158567: Epoch   4 Batch 1680/3125   train_loss = 1.152\n",
      "2018-12-27T17:13:26.284555: Epoch   4 Batch 1700/3125   train_loss = 1.083\n",
      "2018-12-27T17:13:27.388605: Epoch   4 Batch 1720/3125   train_loss = 1.062\n",
      "2018-12-27T17:13:28.492653: Epoch   4 Batch 1740/3125   train_loss = 1.017\n",
      "2018-12-27T17:13:29.615651: Epoch   4 Batch 1760/3125   train_loss = 1.221\n",
      "2018-12-27T17:13:30.734660: Epoch   4 Batch 1780/3125   train_loss = 1.033\n",
      "2018-12-27T17:13:31.875609: Epoch   4 Batch 1800/3125   train_loss = 1.095\n",
      "2018-12-27T17:13:32.973675: Epoch   4 Batch 1820/3125   train_loss = 1.022\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2018-12-27T17:13:34.079718: Epoch   4 Batch 1840/3125   train_loss = 1.091\n",
      "2018-12-27T17:13:35.170801: Epoch   4 Batch 1860/3125   train_loss = 1.109\n",
      "2018-12-27T17:13:36.235953: Epoch   4 Batch 1880/3125   train_loss = 1.058\n",
      "2018-12-27T17:13:37.290135: Epoch   4 Batch 1900/3125   train_loss = 1.003\n",
      "2018-12-27T17:13:38.354290: Epoch   4 Batch 1920/3125   train_loss = 1.063\n",
      "2018-12-27T17:13:39.452355: Epoch   4 Batch 1940/3125   train_loss = 1.019\n",
      "2018-12-27T17:13:40.524489: Epoch   4 Batch 1960/3125   train_loss = 1.003\n",
      "2018-12-27T17:13:41.664441: Epoch   4 Batch 1980/3125   train_loss = 1.110\n",
      "2018-12-27T17:13:42.809384: Epoch   4 Batch 2000/3125   train_loss = 1.185\n",
      "2018-12-27T17:13:43.980250: Epoch   4 Batch 2020/3125   train_loss = 1.128\n",
      "2018-12-27T17:13:45.197996: Epoch   4 Batch 2040/3125   train_loss = 0.980\n",
      "2018-12-27T17:13:46.437728: Epoch   4 Batch 2060/3125   train_loss = 0.942\n",
      "2018-12-27T17:13:47.646501: Epoch   4 Batch 2080/3125   train_loss = 1.241\n",
      "2018-12-27T17:13:48.848286: Epoch   4 Batch 2100/3125   train_loss = 1.023\n",
      "2018-12-27T17:13:50.379193: Epoch   4 Batch 2120/3125   train_loss = 1.016\n",
      "2018-12-27T17:13:51.688693: Epoch   4 Batch 2140/3125   train_loss = 1.097\n",
      "2018-12-27T17:13:52.809695: Epoch   4 Batch 2160/3125   train_loss = 1.107\n",
      "2018-12-27T17:13:53.916737: Epoch   4 Batch 2180/3125   train_loss = 1.188\n",
      "2018-12-27T17:13:55.034747: Epoch   4 Batch 2200/3125   train_loss = 1.057\n",
      "2018-12-27T17:13:56.630482: Epoch   4 Batch 2220/3125   train_loss = 1.065\n",
      "2018-12-27T17:13:57.788387: Epoch   4 Batch 2240/3125   train_loss = 1.008\n",
      "2018-12-27T17:13:58.969230: Epoch   4 Batch 2260/3125   train_loss = 1.117\n",
      "2018-12-27T17:14:00.121164: Epoch   4 Batch 2280/3125   train_loss = 1.115\n",
      "2018-12-27T17:14:01.338895: Epoch   4 Batch 2300/3125   train_loss = 1.139\n",
      "2018-12-27T17:14:02.378117: Epoch   4 Batch 2320/3125   train_loss = 1.262\n",
      "2018-12-27T17:14:03.485157: Epoch   4 Batch 2340/3125   train_loss = 1.096\n",
      "2018-12-27T17:14:04.553302: Epoch   4 Batch 2360/3125   train_loss = 1.169\n",
      "2018-12-27T17:14:05.701234: Epoch   4 Batch 2380/3125   train_loss = 1.136\n",
      "2018-12-27T17:14:06.823243: Epoch   4 Batch 2400/3125   train_loss = 1.111\n",
      "2018-12-27T17:14:07.875432: Epoch   4 Batch 2420/3125   train_loss = 1.044\n",
      "2018-12-27T17:14:09.044307: Epoch   4 Batch 2440/3125   train_loss = 1.131\n",
      "2018-12-27T17:14:10.073556: Epoch   4 Batch 2460/3125   train_loss = 1.124\n",
      "2018-12-27T17:14:11.116767: Epoch   4 Batch 2480/3125   train_loss = 1.168\n",
      "2018-12-27T17:14:12.307584: Epoch   4 Batch 2500/3125   train_loss = 1.084\n",
      "2018-12-27T17:14:13.422622: Epoch   4 Batch 2520/3125   train_loss = 1.103\n",
      "2018-12-27T17:14:14.623393: Epoch   4 Batch 2540/3125   train_loss = 1.048\n",
      "2018-12-27T17:14:15.987745: Epoch   4 Batch 2560/3125   train_loss = 0.888\n",
      "2018-12-27T17:14:17.054893: Epoch   4 Batch 2580/3125   train_loss = 1.112\n",
      "2018-12-27T17:14:18.208808: Epoch   4 Batch 2600/3125   train_loss = 1.057\n",
      "2018-12-27T17:14:19.448494: Epoch   4 Batch 2620/3125   train_loss = 1.054\n",
      "2018-12-27T17:14:20.612384: Epoch   4 Batch 2640/3125   train_loss = 1.068\n",
      "2018-12-27T17:14:21.789237: Epoch   4 Batch 2660/3125   train_loss = 1.152\n",
      "2018-12-27T17:14:23.228390: Epoch   4 Batch 2680/3125   train_loss = 1.049\n",
      "2018-12-27T17:14:24.490017: Epoch   4 Batch 2700/3125   train_loss = 1.206\n",
      "2018-12-27T17:14:25.990007: Epoch   4 Batch 2720/3125   train_loss = 1.018\n",
      "2018-12-27T17:14:27.138935: Epoch   4 Batch 2740/3125   train_loss = 1.097\n",
      "2018-12-27T17:14:28.545177: Epoch   4 Batch 2760/3125   train_loss = 1.078\n",
      "2018-12-27T17:14:29.775886: Epoch   4 Batch 2780/3125   train_loss = 1.150\n",
      "2018-12-27T17:14:30.988649: Epoch   4 Batch 2800/3125   train_loss = 1.244\n",
      "2018-12-27T17:14:32.177468: Epoch   4 Batch 2820/3125   train_loss = 1.342\n",
      "2018-12-27T17:14:33.414161: Epoch   4 Batch 2840/3125   train_loss = 1.104\n",
      "2018-12-27T17:14:34.593010: Epoch   4 Batch 2860/3125   train_loss = 1.078\n",
      "2018-12-27T17:14:36.099982: Epoch   4 Batch 2880/3125   train_loss = 1.044\n",
      "2018-12-27T17:14:37.582020: Epoch   4 Batch 2900/3125   train_loss = 1.110\n",
      "2018-12-27T17:14:39.085002: Epoch   4 Batch 2920/3125   train_loss = 1.156\n",
      "2018-12-27T17:14:40.396504: Epoch   4 Batch 2940/3125   train_loss = 1.055\n",
      "2018-12-27T17:14:41.615238: Epoch   4 Batch 2960/3125   train_loss = 1.055\n",
      "2018-12-27T17:14:42.796085: Epoch   4 Batch 2980/3125   train_loss = 0.994\n",
      "2018-12-27T17:14:44.143479: Epoch   4 Batch 3000/3125   train_loss = 1.137\n",
      "2018-12-27T17:14:45.289416: Epoch   4 Batch 3020/3125   train_loss = 1.118\n",
      "2018-12-27T17:14:46.621853: Epoch   4 Batch 3040/3125   train_loss = 1.067\n",
      "2018-12-27T17:14:47.782751: Epoch   4 Batch 3060/3125   train_loss = 1.073\n",
      "2018-12-27T17:14:49.007477: Epoch   4 Batch 3080/3125   train_loss = 1.254\n",
      "2018-12-27T17:14:50.442639: Epoch   4 Batch 3100/3125   train_loss = 1.155\n",
      "2018-12-27T17:14:51.647420: Epoch   4 Batch 3120/3125   train_loss = 1.074\n",
      "2018-12-27T17:14:52.232853: Epoch   4 Batch   16/781   test_loss = 1.082\n",
      "2018-12-27T17:14:52.572945: Epoch   4 Batch   36/781   test_loss = 1.126\n",
      "2018-12-27T17:14:52.927995: Epoch   4 Batch   56/781   test_loss = 1.190\n",
      "2018-12-27T17:14:53.252129: Epoch   4 Batch   76/781   test_loss = 1.185\n",
      "2018-12-27T17:14:53.559307: Epoch   4 Batch   96/781   test_loss = 1.352\n",
      "2018-12-27T17:14:53.851527: Epoch   4 Batch  116/781   test_loss = 1.074\n",
      "2018-12-27T17:14:54.197601: Epoch   4 Batch  136/781   test_loss = 1.025\n",
      "2018-12-27T17:14:54.471868: Epoch   4 Batch  156/781   test_loss = 1.143\n",
      "2018-12-27T17:14:54.755111: Epoch   4 Batch  176/781   test_loss = 1.254\n",
      "2018-12-27T17:14:55.095202: Epoch   4 Batch  196/781   test_loss = 1.029\n",
      "2018-12-27T17:14:55.427249: Epoch   4 Batch  216/781   test_loss = 1.212\n",
      "2018-12-27T17:14:55.754375: Epoch   4 Batch  236/781   test_loss = 1.044\n",
      "2018-12-27T17:14:56.030636: Epoch   4 Batch  256/781   test_loss = 1.131\n",
      "2018-12-27T17:14:56.306897: Epoch   4 Batch  276/781   test_loss = 1.383\n",
      "2018-12-27T17:14:56.751708: Epoch   4 Batch  296/781   test_loss = 1.154\n",
      "2018-12-27T17:14:57.033954: Epoch   4 Batch  316/781   test_loss = 1.079\n",
      "2018-12-27T17:14:57.314205: Epoch   4 Batch  336/781   test_loss = 1.009\n",
      "2018-12-27T17:14:57.606423: Epoch   4 Batch  356/781   test_loss = 1.116\n",
      "2018-12-27T17:14:57.916594: Epoch   4 Batch  376/781   test_loss = 1.169\n",
      "2018-12-27T17:14:58.225769: Epoch   4 Batch  396/781   test_loss = 1.301\n",
      "2018-12-27T17:14:58.660606: Epoch   4 Batch  416/781   test_loss = 1.144\n",
      "2018-12-27T17:14:59.033609: Epoch   4 Batch  436/781   test_loss = 1.233\n",
      "2018-12-27T17:14:59.317849: Epoch   4 Batch  456/781   test_loss = 1.032\n",
      "2018-12-27T17:14:59.642979: Epoch   4 Batch  476/781   test_loss = 1.344\n",
      "2018-12-27T17:14:59.983070: Epoch   4 Batch  496/781   test_loss = 1.220\n",
      "2018-12-27T17:15:00.278281: Epoch   4 Batch  516/781   test_loss = 1.026\n",
      "2018-12-27T17:15:00.649302: Epoch   4 Batch  536/781   test_loss = 1.335\n",
      "2018-12-27T17:15:01.074154: Epoch   4 Batch  556/781   test_loss = 1.145\n",
      "2018-12-27T17:15:01.394297: Epoch   4 Batch  576/781   test_loss = 1.362\n",
      "2018-12-27T17:15:01.728404: Epoch   4 Batch  596/781   test_loss = 1.208\n",
      "2018-12-27T17:15:02.079467: Epoch   4 Batch  616/781   test_loss = 1.113\n",
      "2018-12-27T17:15:02.439503: Epoch   4 Batch  636/781   test_loss = 1.112\n",
      "2018-12-27T17:15:02.856390: Epoch   4 Batch  656/781   test_loss = 1.295\n",
      "2018-12-27T17:15:03.195482: Epoch   4 Batch  676/781   test_loss = 1.454\n",
      "2018-12-27T17:15:03.483713: Epoch   4 Batch  696/781   test_loss = 1.132\n",
      "2018-12-27T17:15:03.774934: Epoch   4 Batch  716/781   test_loss = 1.172\n",
      "2018-12-27T17:15:04.067153: Epoch   4 Batch  736/781   test_loss = 1.341\n",
      "2018-12-27T17:15:04.346406: Epoch   4 Batch  756/781   test_loss = 1.040\n",
      "2018-12-27T17:15:04.653585: Epoch   4 Batch  776/781   test_loss = 0.972\n",
      "Model Trained and Saved\n"
     ]
    }
   ],
   "source": [
    "def get_batches(Xs, ys, batch_size):\n",
    "    for start in range(0, len(Xs), batch_size):\n",
    "        end = min(start + batch_size, len(Xs))\n",
    "        yield Xs[start:end], ys[start:end]\n",
    "%matplotlib inline\n",
    "%config InlineBackend.figure_format = 'retina'\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import datetime\n",
    "#0 UserID                                                     2\n",
    "#1 MovieID                                                 1193\n",
    "# Rating                                                     5\n",
    "#2 Gender                                                     1\n",
    "#3 Age                                                        5\n",
    "#4 Title      [4094, 4032, 1255, 4199, 3605, 90, 2102, 2102,...\n",
    "#5 Genres     [10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 1...\n",
    "# Name: 1, dtype: object\n",
    "losses = {'train':[], 'test':[]}\n",
    " #train_graph是default_graph\n",
    "with tf.Session(graph=train_graph) as sess:\n",
    "#搜集数据给tensorBoard用\n",
    "    # Keep track of gradient values and sparsity\n",
    "    grad_summaries = []#梯度图\n",
    "    for g, v in gradients:\n",
    "        if g is not None:\n",
    "            grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name.replace(':', '_')), g)#柱状图\n",
    "            sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name.replace(':', '_')), tf.nn.zero_fraction(g))\n",
    "            grad_summaries.append(grad_hist_summary)\n",
    "            grad_summaries.append(sparsity_summary)\n",
    "    grad_summaries_merged = tf.summary.merge(grad_summaries)\n",
    " \n",
    "    # Output directory for models and summaries\n",
    "    timestamp = str(int(time.time()))\n",
    "    out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n",
    "    print(\"Writing to {}\\n\".format(out_dir))\n",
    " \n",
    "    # Summaries for loss and accuracy\n",
    "    loss_summary = tf.summary.scalar(\"loss\", loss)#将【计算图】中的【标量数据】写入TensorFlow中的【日志文件】，以便为将来tensorboard的可视化做准备\n",
    " \n",
    "    # Train Summaries\n",
    "    train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])#选择要显示的信息\n",
    "    train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n",
    "    train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n",
    " \n",
    "    # Inference summaries\n",
    "    inference_summary_op = tf.summary.merge([loss_summary])#选择要显示的信息\n",
    "    #写入文件\n",
    "    inference_summary_dir = os.path.join(out_dir, \"summaries\", \"inference\")\n",
    "    inference_summary_writer = tf.summary.FileWriter(inference_summary_dir, sess.graph)\n",
    " \n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    saver = tf.train.Saver()#循环训练要用到的模型保存器  \n",
    "    #定期调用 saver.save() 方法，向文件夹中写入包含了当前模型中所有可训练变量的 checkpoint 文件。\n",
    "    for epoch_i in range(num_epochs): #交叉验证次数5次\n",
    "        #将数据集分成训练集和测试集，随机种子不固定\n",
    "        train_X,test_X, train_y, test_y = train_test_split(features,  \n",
    "                                                           targets_values,  \n",
    "                                                           test_size = 0.2,  \n",
    "                                                           random_state = 0)  \n",
    " \n",
    "        train_batches = get_batches(train_X, train_y, batch_size)#一个元组\n",
    "        test_batches = get_batches(test_X, test_y, batch_size)\n",
    " \n",
    "        #训练的迭代，保存训练损失\n",
    "        for batch_i in range(len(train_X) // batch_size):#训练集分节  以步长为256放入模型，每次的道一些诸如loss等模型评估状态\n",
    "            x, y = next(train_batches)\n",
    "            categories = np.zeros([batch_size, 18])\n",
    "            for i in range(batch_size):\n",
    "                categories[i] = x.take(6,1)[i]\n",
    " \n",
    "            titles = np.zeros([batch_size, sentences_size])\n",
    "            for i in range(batch_size):\n",
    "                titles[i] = x.take(5,1)[i]#取title对应列\n",
    "             #喂数据\n",
    "            feed = {\n",
    "                uid: np.reshape(x.take(0,1), [batch_size, 1]),\n",
    "                user_gender: np.reshape(x.take(2,1), [batch_size, 1]),\n",
    "                user_age: np.reshape(x.take(3,1), [batch_size, 1]),\n",
    "                user_job: np.reshape(x.take(4,1), [batch_size, 1]),\n",
    "                movie_id: np.reshape(x.take(1,1), [batch_size, 1]),\n",
    "                movie_categories: categories,  #x.take(6,1)\n",
    "                movie_titles: titles,  #x.take(5,1)\n",
    "                targets: np.reshape(y, [batch_size, 1]),\n",
    "                dropout_keep_prob: dropout_keep, #dropout_keep\n",
    "                lr: learning_rate}\n",
    "             #模型返回 ：多少个数据，num_batch次训练后的损失，日志 ,_(?)\n",
    "            step, train_loss, summaries, _ = sess.run([global_step, loss, train_summary_op, train_op], feed)  #cost\n",
    "            losses['train'].append(train_loss)#每一个损失都记录，每个损失由256个输入训练得到\n",
    "            train_summary_writer.add_summary(summaries, step)  #\n",
    " \n",
    "            # Show every <show_every_n_batches> batches\n",
    "            if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:\n",
    "                time_str = datetime.datetime.now().isoformat()\n",
    "                print('{}: Epoch {:>3} Batch {:>4}/{}   train_loss = {:.3f}'.format(\n",
    "                    time_str,\n",
    "                    epoch_i,\n",
    "                    batch_i,\n",
    "                    (len(train_X) // batch_size),\n",
    "                    train_loss))\n",
    " \n",
    "        #使用测试数据的迭代\n",
    "        for batch_i  in range(len(test_X) // batch_size):\n",
    "            x, y = next(test_batches)\n",
    " \n",
    "            categories = np.zeros([batch_size, 18])\n",
    "            for i in range(batch_size):\n",
    "                categories[i] = x.take(6,1)[i]\n",
    " \n",
    "            titles = np.zeros([batch_size, sentences_size])\n",
    "            for i in range(batch_size):\n",
    "                titles[i] = x.take(5,1)[i]\n",
    " \n",
    "            feed = {\n",
    "                uid: np.reshape(x.take(0,1), [batch_size, 1]),\n",
    "                user_gender: np.reshape(x.take(2,1), [batch_size, 1]),\n",
    "                user_age: np.reshape(x.take(3,1), [batch_size, 1]),\n",
    "                user_job: np.reshape(x.take(4,1), [batch_size, 1]),\n",
    "                movie_id: np.reshape(x.take(1,1), [batch_size, 1]),\n",
    "                movie_categories: categories,  #x.take(6,1)\n",
    "                movie_titles: titles,  #x.take(5,1)\n",
    "                targets: np.reshape(y, [batch_size, 1]),\n",
    "                dropout_keep_prob: 1,\n",
    "                lr: learning_rate}\n",
    " \n",
    "            step, test_loss, summaries = sess.run([global_step, loss, inference_summary_op], feed)  #cost\n",
    "             \n",
    "            #保存测试损失\n",
    "            losses['test'].append(test_loss)\n",
    "            inference_summary_writer.add_summary(summaries, step)  #\n",
    " \n",
    "            time_str = datetime.datetime.now().isoformat()\n",
    "            if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:\n",
    "                print('{}: Epoch {:>3} Batch {:>4}/{}   test_loss = {:.3f}'.format(\n",
    "                    time_str,\n",
    "                    epoch_i,\n",
    "                    batch_i,\n",
    "                    (len(test_X) // batch_size),\n",
    "                    test_loss))\n",
    " \n",
    "    # Save Model\n",
    "    saver.save(sess, save_dir)  #, global_step=epoch_i\n",
    "    print('Model Trained and Saved')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取 Tensors\n",
    "def get_tensors(loaded_graph):\n",
    "#怎样load这个graph和得到变量\n",
    "    uid = loaded_graph.get_tensor_by_name(\"uid:0\")\n",
    "    user_gender = loaded_graph.get_tensor_by_name(\"user_gender:0\")\n",
    "    user_age = loaded_graph.get_tensor_by_name(\"user_age:0\")\n",
    "    user_job = loaded_graph.get_tensor_by_name(\"user_job:0\")\n",
    "    movie_id = loaded_graph.get_tensor_by_name(\"movie_id:0\")\n",
    "    movie_categories = loaded_graph.get_tensor_by_name(\"movie_categories:0\")\n",
    "    movie_titles = loaded_graph.get_tensor_by_name(\"movie_titles:0\")\n",
    "    targets = loaded_graph.get_tensor_by_name(\"targets:0\")\n",
    "    dropout_keep_prob = loaded_graph.get_tensor_by_name(\"dropout_keep_prob:0\")\n",
    "    lr = loaded_graph.get_tensor_by_name(\"LearningRate:0\")\n",
    "    #两种不同计算预测评分的方案使用不同的name获取tensor inference\n",
    "#     inference = loaded_graph.get_tensor_by_name(\"inference/inference/BiasAdd:0\")\n",
    "    inference = loaded_graph.get_tensor_by_name(\"inference/MatMul:0\")#\n",
    "    movie_combine_layer_flat = loaded_graph.get_tensor_by_name(\"movie_fc/Reshape:0\")\n",
    "    user_combine_layer_flat = loaded_graph.get_tensor_by_name(\"user_fc/Reshape:0\")\n",
    "    return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, movie_combine_layer_flat, user_combine_layer_flat\n",
    "#inference是预测评分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n",
      "INFO:tensorflow:Restoring parameters from C:\\Users\\abc\\Desktop\\model\\save\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[array([[5.7040863]], dtype=float32)]"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#指定用户和电影进行评分\n",
    "#对网络做正向传播，计算得到预测的评分  #meta文件就是模型\n",
    "\n",
    "def rating_movie(user_id_val, movie_id_val):#user_id_val, movie_id_val？\n",
    "    loaded_graph = tf.Graph()  #\n",
    "    with tf.Session(graph=loaded_graph) as sess:  #\n",
    "        # Load saved model\n",
    "        loader = tf.train.import_meta_graph('save.meta')\n",
    "        loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "        #sess里面有了参数\n",
    "        # Get Tensors from loaded model\n",
    "        uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference,_, __ = get_tensors(loaded_graph)  #loaded_graph\n",
    "\n",
    "        categories = np.zeros([1, 18])\n",
    "        categories[0] = movies.values[movieid2idx[movie_id_val]][2]#movies是转化过的的dataframe\n",
    "        #movies样式为 id  name  categories \n",
    "        \n",
    "        titles = np.zeros([1, sentences_size])\n",
    "        titles[0] = movies.values[movieid2idx[movie_id_val]][1]\n",
    "\n",
    "        feed = {\n",
    "              uid: np.reshape(users.values[user_id_val-1][0], [1, 1]),\n",
    "              user_gender: np.reshape(users.values[user_id_val-1][1], [1, 1]),\n",
    "              user_age: np.reshape(users.values[user_id_val-1][2], [1, 1]),\n",
    "              user_job: np.reshape(users.values[user_id_val-1][3], [1, 1]),\n",
    "              movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),\n",
    "              movie_categories: categories,  #x.take(6,1)\n",
    "              movie_titles: titles,  #x.take(5,1)\n",
    "              dropout_keep_prob: 1}\n",
    "\n",
    "        # Get Prediction\n",
    "        inference_val = sess.run([inference], feed)  #inference是预测评分  正向传播title和id\n",
    "\n",
    "        return (inference_val)\n",
    "#rating_movie(123,1401)#指定用户对指定电影的预测评分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from C:\\Users\\abc\\Desktop\\model\\save\n",
      "INFO:tensorflow:Restoring parameters from C:\\Users\\abc\\Desktop\\model\\save\n"
     ]
    }
   ],
   "source": [
    "#生成Movie特征矩阵\n",
    "#将训练好的电影特征组合成电影特征矩阵并保存到本地\n",
    "import tensorflow as tf\n",
    "loaded_graph = tf.Graph()  #\n",
    "movie_matrics = []\n",
    "with tf.Session(graph=loaded_graph) as sess:  #\n",
    "        # Load saved model\n",
    "    loader = tf.train.import_meta_graph('save.meta')\n",
    "    loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "    # Get Tensors from loaded model\n",
    "    uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, movie_combine_layer_flat, __ = get_tensors(loaded_graph)  #loaded_graph\n",
    "\n",
    "    for item in movies.values:\n",
    "        categories = np.zeros([1, 18])\n",
    "        categories[0] = item.take(2)\n",
    "\n",
    "        titles = np.zeros([1, sentences_size])\n",
    "        titles[0] = item.take(1)\n",
    "\n",
    "        feed = {\n",
    "            movie_id: np.reshape(item.take(0), [1, 1]),\n",
    "            movie_categories: categories,  #x.take(6,1)\n",
    "            movie_titles: titles,  #x.take(5,1)\n",
    "            dropout_keep_prob: 1}\n",
    "        #嵌入层运算\n",
    "        movie_combine_layer_flat_val = sess.run([movie_combine_layer_flat], feed)  #movie_combine_layer_flat自动完成矩阵乘法，concat，全连接神经一层\n",
    "        #和文本卷积，concat，全连接\n",
    "        movie_matrics.append(movie_combine_layer_flat_val)#得到每组电影id categories title的特征\n",
    "\n",
    "pickle.dump((np.array(movie_matrics).reshape(-1, 200)), open('movie_matrics.p', 'wb'))\n",
    "movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))\n",
    "#生成User特征矩阵\n",
    "#将训练好的用户特征组合成用户特征矩阵并保存到本地\n",
    "loaded_graph = tf.Graph()  #\n",
    "users_matrics = []\n",
    "with tf.Session(graph=loaded_graph) as sess:  #\n",
    "        # Load saved model\n",
    "    loader = tf.train.import_meta_graph('save.meta')\n",
    "    loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "\n",
    "    # Get Tensors from loaded model\n",
    "    uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, __,user_combine_layer_flat = get_tensors(loaded_graph)  #loaded_graph\n",
    "\n",
    "    for item in users.values:\n",
    "\n",
    "        feed = {\n",
    "            uid: np.reshape(item.take(0), [1, 1]),\n",
    "            user_gender: np.reshape(item.take(1), [1, 1]),\n",
    "            user_age: np.reshape(item.take(2), [1, 1]),\n",
    "            user_job: np.reshape(item.take(3), [1, 1]),\n",
    "            dropout_keep_prob: 1}\n",
    "\n",
    "        user_combine_layer_flat_val = sess.run([user_combine_layer_flat], feed)  \n",
    "        users_matrics.append(user_combine_layer_flat_val)\n",
    "\n",
    "pickle.dump((np.array(users_matrics).reshape(-1, 200)), open('users_matrics.p', 'wb'))\n",
    "users_matrics = pickle.load(open('users_matrics.p', mode='rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "#\n",
    "#使用生产的用户特征矩阵和电影特征矩阵做电影推荐\n",
    "\n",
    "#1、推荐同类型的电影\n",
    "#思路是计算当前看的电影特征向量与整个电影特征矩阵的余弦相似度，取相似度最大的top_k个，\n",
    "#这里加了些随机选择在里面，保证每次的推荐稍稍有些不同。\n",
    "\n",
    "def recommend_same_type_movie(movie_id_val, top_k = 20):#\n",
    "    loaded_graph = tf.Graph()  #\n",
    "    file_dir = r'C:\\Users\\abc\\Desktop\\model\\same_type_recommend.txt'\n",
    "    file = open(file_dir,'w',encoding = 'utf-8')\n",
    "    with tf.Session(graph=loaded_graph) as sess:  #\n",
    "        # Load saved model\n",
    "        loader = tf.train.import_meta_graph('save.meta')\n",
    "        loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "        \n",
    "        for i in movie_id_val :\n",
    "            file.write(str(movies_orig[int(movieid2idx[i])][1])+'\\t')\n",
    "            #movie_matrics每组电影id categories title的特征   除以模长\n",
    "            norm_movie_matrics = tf.sqrt(tf.reduce_sum(tf.square(movie_matrics), 1, keep_dims=True))\n",
    "            normalized_movie_matrics = movie_matrics / norm_movie_matrics\n",
    "\n",
    "            #推荐同类型的电影\n",
    "            probs_embeddings = (movie_matrics[movieid2idx[i]]).reshape([1, 200])#另一个电影的特征向量\n",
    "            probs_similarity = tf.matmul(probs_embeddings, tf.transpose(normalized_movie_matrics))\n",
    "            sim = (probs_similarity.eval())\n",
    "            results = (-sim[0]).argsort()[0:top_k]\n",
    "            print(results)\n",
    "            for val in results:                \n",
    "                file.write(movies_orig[val][1]+'\\t')\n",
    "            file.write('\\n')\n",
    "            #print(\"您看的电影是：{}\".format(movies_orig[movieid2idx[movie_id_val]]))\n",
    "            #print(\"以下是给您的推荐：\")\n",
    "            #p = np.squeeze(sim)\n",
    "            #p[np.argsort(p)[:-top_k]] = 0\n",
    "            #p = p / np.sum(p)\n",
    "#             results = set()\n",
    "#             while len(results) != 5:\n",
    "#                 c = np.random.choice(3883, 1, p=p)[0]\n",
    "#                 results.add(c)    \n",
    "#             #file.write(str(movies_orig[int(movieid2idx[i])][1])+'\\t')\n",
    "#             for val in (results):\n",
    "#                 print(type(val))#print(movies_orig[val]) \n",
    "        file.close()\n",
    "        return results\n",
    "#2、推荐您喜欢的电影\n",
    "#思路是使用用户特征向量与电影特征矩阵计算所有电影的评分，\n",
    "#取评分最高的top_k个，同样加了些随机选择部分。\n",
    "def recommend_your_favorite_movie(user_id_val, top_k = 10):\n",
    "\n",
    "    loaded_graph = tf.Graph()  #\n",
    "    file_dir = r'C:\\Users\\abc\\Desktop\\model\\recommend_your_favorite_movies.txt'\n",
    "    file = open(file_dir,'w',encoding='utf-8')\n",
    "    with tf.Session(graph=loaded_graph) as sess:  #\n",
    "        # Load saved model\n",
    "        loader = tf.train.import_meta_graph('save.meta')\n",
    "        loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "        #推荐您喜欢的电影\n",
    "        for i in user_id_val:\n",
    "            file.write(str(i)+'\\t')      \n",
    "            probs_embeddings = (users_matrics[i-1]).reshape([1, 200])\n",
    "            probs_similarity = tf.matmul(probs_embeddings, tf.transpose(movie_matrics))\n",
    "            sim = (probs_similarity.eval())\n",
    "        #     print(sim.shape)\n",
    "            results = (-sim[0]).argsort()[0:top_k]\n",
    "            print(results)\n",
    "            for val in results:                \n",
    "                file.write(movies_orig[val][1]+'\\t')\n",
    "            file.write('\\n')\n",
    "        #     sim_norm = probs_norm_similarity.eval()\n",
    "        #     print((-sim_norm[0]).argsort()[0:top_k])\n",
    "\n",
    "            #print(\"以下是给您的推荐：\")\n",
    "#             p = np.squeeze(sim)\n",
    "#             p[np.argsort(p)[:-top_k]] = 0\n",
    "#             p = p / np.sum(p)\n",
    "#             results = set()\n",
    "#             while len(results) != 5:\n",
    "#                 c = np.random.choice(3883, 1, p=p)[0]\n",
    "#                 results.add(c)\n",
    "#             for val in (results):\n",
    "#                 #print(val)\n",
    "#                 #print(movies_orig[val])\n",
    "        file.close()\n",
    "        return results\n",
    "#看过这个电影的人还看了（喜欢）哪些电影\n",
    "#首先选出喜欢某个电影的top_k个人，得到这几个人的用户特征向量。\n",
    "#然后计算这几个人对所有电影的评分\n",
    "#选择每个人评分最高的电影作为推荐\n",
    "#同样加入了随机选择\n",
    "import random\n",
    "def recommend_other_favorite_movie(movie_id_val, top_k = 20):\n",
    "    loaded_graph = tf.Graph()  #\n",
    "    \n",
    "    file_dir = r'C:\\Users\\abc\\Desktop\\model\\recommend_other_favorite_movie.txt'\n",
    "    with open(file_dir,'w',encoding='utf-8')as file:\n",
    "        with tf.Session(graph=loaded_graph) as sess:  \n",
    "            # Load saved model\n",
    "            loader = tf.train.import_meta_graph('save.meta')\n",
    "            loader.restore(sess, tf.train.latest_checkpoint(r\"C:\\Users\\abc\\Desktop\\model\"))\n",
    "            for i in movie_id_val:\n",
    "\n",
    "                #找相似用户\n",
    "                probs_movie_embeddings = (movie_matrics[movieid2idx[i]]).reshape([1, 200])\n",
    "                probs_user_favorite_similarity = tf.matmul(probs_movie_embeddings, tf.transpose(users_matrics))#向量乘矩阵得到向量\n",
    "                favorite_user_id = np.argsort(probs_user_favorite_similarity.eval())[0][-top_k:]#索引下标\n",
    "            #     print(normalized_users_matrics.eval().shape)\n",
    "            #     print(probs_user_favorite_similarity.eval()[0][favorite_user_id])\n",
    "            #     print(favorite_user_id.shape)\n",
    "\n",
    "                print(\"您看的电影是：{}\".format(movies_orig[movieid2idx[i]]))\n",
    "                #找这些用户的电影\n",
    "                print(\"喜欢看这个电影的人是：{}\".format(users_orig[favorite_user_id-1])) \n",
    "                probs_users_embeddings = (users_matrics[favorite_user_id-1]).reshape([-1, 200])\n",
    "                probs_similarity = tf.matmul(probs_users_embeddings, tf.transpose(movie_matrics))#挑选出的top_k*200矩阵和movie_matrics矩阵相乘\n",
    "                sim = (probs_similarity.eval())\n",
    "                results = (-sim[0]).argsort()[0:top_k]\n",
    "                file.write(str(i)+':'+'\\t')\n",
    "                for val in results:\n",
    "                    file.write(movies_orig[val][1]+'\\t')#写下排好的电影\n",
    "                file.write('\\n')\n",
    "            \n",
    "        #     print(results)\n",
    "\n",
    "        #     print(sim.shape)\n",
    "        #     print(np.argmax(sim, 1))\n",
    "#         p = np.argmax(sim, 1)\n",
    "#         print(\"喜欢看这个电影的人还喜欢看：\")\n",
    "\n",
    "#         results = set()\n",
    "#         while len(results) != 5:\n",
    "#             c = p[random.randrange(top_k)]\n",
    "#             results.add(c)\n",
    "#         for val in (results):\n",
    "#             print(val)\n",
    "#             print(movies_orig[val])\n",
    "\n",
    "        return results\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from C:\\Users\\abc\\Desktop\\model\\save\n",
      "您看的电影是：[1 'Toy Story (1995)' \"Animation|Children's|Comedy\"]\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3015 'M' 56 6]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[2 'Jumanji (1995)' \"Adventure|Children's|Fantasy\"]\n",
      "喜欢看这个电影的人是：[[5918 'M' 25 12]\n",
      " [5622 'M' 35 15]\n",
      " [3015 'M' 56 6]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[3 'Grumpier Old Men (1995)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [1763 'M' 35 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[4 'Waiting to Exhale (1995)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[5 'Father of the Bride Part II (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[1763 'M' 35 7]\n",
      " [1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[6 'Heat (1995)' 'Action|Crime|Thriller']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[7 'Sabrina (1995)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[3631 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[8 'Tom and Huck (1995)' \"Adventure|Children's\"]\n",
      "喜欢看这个电影的人是：[[3631 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[9 'Sudden Death (1995)' 'Action']\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [3907 'F' 18 14]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[10 'GoldenEye (1995)' 'Action|Adventure|Thriller']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[11 'American President, The (1995)' 'Comedy|Drama|Romance']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[12 'Dracula: Dead and Loving It (1995)' 'Comedy|Horror']\n",
      "喜欢看这个电影的人是：[[4612 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[13 'Balto (1995)' \"Animation|Children's\"]\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3015 'M' 56 6]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[14 'Nixon (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3874 'M' 25 7]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[15 'Cutthroat Island (1995)' 'Action|Adventure|Romance']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3631 'M' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[16 'Casino (1995)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [1603 'F' 25 0]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[17 'Sense and Sensibility (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[18 'Four Rooms (1995)' 'Thriller']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[19 'Ace Ventura: When Nature Calls (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3015 'M' 56 6]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[20 'Money Train (1995)' 'Action']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [428 'F' 18 4]\n",
      " [3631 'M' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[21 'Get Shorty (1995)' 'Action|Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[22 'Copycat (1995)' 'Crime|Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[23 'Assassins (1995)' 'Thriller']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[24 'Powder (1995)' 'Drama|Sci-Fi']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[25 'Leaving Las Vegas (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[26 'Othello (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[27 'Now and Then (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[28 'Persuasion (1995)' 'Romance']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[29 'City of Lost Children, The (1995)' 'Adventure|Sci-Fi']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[30 'Shanghai Triad (Yao a yao yao dao waipo qiao) (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[31 'Dangerous Minds (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[32 'Twelve Monkeys (1995)' 'Drama|Sci-Fi']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[33 'Wings of Courage (1995)' 'Adventure|Romance']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [3015 'M' 56 6]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[34 'Babe (1995)' \"Children's|Comedy|Drama\"]\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[35 'Carrington (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3031 'M' 18 4]\n",
      " [1603 'F' 25 0]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[36 'Dead Man Walking (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[37 'Across the Sea of Time (1995)' 'Documentary']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [4612 'F' 18 4]\n",
      " [3015 'M' 56 6]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[38 'It Takes Two (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[39 'Clueless (1995)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[40 'Cry, the Beloved Country (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[41 'Richard III (1995)' 'Drama|War']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3631 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[42 'Dead Presidents (1995)' 'Action|Crime|Drama']\n",
      "喜欢看这个电影的人是：[[1967 'M' 50 7]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [437 'M' 35 17]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[43 'Restoration (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[44 'Mortal Kombat (1995)' 'Action|Adventure']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3015 'M' 56 6]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[45 'To Die For (1995)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[46 'How to Make an American Quilt (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[47 'Seven (Se7en) (1995)' 'Crime|Thriller']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[48 'Pocahontas (1995)' \"Animation|Children's|Musical|Romance\"]\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [1603 'F' 25 0]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[49 'When Night Is Falling (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [1603 'F' 25 0]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[50 'Usual Suspects, The (1995)' 'Crime|Thriller']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[51 'Guardian Angel (1994)' 'Action|Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[52 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[52 'Mighty Aphrodite (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [428 'F' 18 4]\n",
      " [1763 'M' 35 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[53 'Lamerica (1994)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1967 'M' 50 7]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[54 'Big Green, The (1995)' \"Children's|Comedy\"]\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[55 'Georgia (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3631 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[56 'Kids of the Round Table (1995)' \"Adventure|Children's|Fantasy\"]\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [1809 'F' 25 14]\n",
      " [3015 'M' 56 6]\n",
      " [6022 'M' 25 17]\n",
      " [3907 'F' 18 14]]\n",
      "您看的电影是：[57 'Home for the Holidays (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[58 'Postino, Il (The Postman) (1994)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[59 'Confessional, The (Le Confessionnal) (1995)' 'Drama|Mystery']\n",
      "喜欢看这个电影的人是：[[1967 'M' 50 7]\n",
      " [3874 'M' 25 7]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[60 'Indian in the Cupboard, The (1995)' \"Adventure|Children's|Fantasy\"]\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]]\n",
      "您看的电影是：[61 'Eye for an Eye (1996)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[62 \"Mr. Holland's Opus (1995)\" 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[63\n",
      " \"Don't Be a Menace to South Central While Drinking Your Juice in the Hood (1996)\"\n",
      " 'Comedy']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[64 'Two if by Sea (1996)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[65 'Bio-Dome (1996)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[66 'Lawnmower Man 2: Beyond Cyberspace (1996)' 'Sci-Fi|Thriller']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[67 'Two Bits (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[68 'French Twist (Gazon maudit) (1995)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [3031 'M' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[69 'Friday (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[70 'From Dusk Till Dawn (1996)' 'Action|Comedy|Crime|Horror|Thriller']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [437 'M' 35 17]]\n",
      "您看的电影是：[71 'Fair Game (1995)' 'Action']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [3631 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[72 'Kicking and Screaming (1995)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[73 'Mis閞ables, Les (1995)' 'Drama|Musical']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[74 'Bed of Roses (1996)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [437 'M' 35 17]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[75 'Big Bully (1996)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[76 'Screamers (1995)' 'Sci-Fi|Thriller']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [3631 'M' 18 4]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[77 'Nico Icon (1995)' 'Documentary']\n",
      "喜欢看这个电影的人是：[[6022 'M' 25 17]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3631 'M' 18 4]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[78 'Crossing Guard, The (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[79 'Juror, The (1996)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[80 'White Balloon, The (Badkonake Sefid ) (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[81 \"Things to Do in Denver when You're Dead (1995)\" 'Crime|Drama|Romance']\n",
      "喜欢看这个电影的人是：[[732 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[82 \"Antonia's Line (Antonia) (1995)\" 'Drama']\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [5622 'M' 35 15]\n",
      " [3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[83 'Once Upon a Time... When We Were Colored (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[84 'Last Summer in the Hamptons (1995)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[85 'Angels and Insects (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[86 'White Squall (1996)' 'Adventure|Drama']\n",
      "喜欢看这个电影的人是：[[5622 'M' 35 15]\n",
      " [3874 'M' 25 7]\n",
      " [1809 'F' 25 14]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[87 'Dunston Checks In (1996)' \"Children's|Comedy\"]\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[88 'Black Sheep (1996)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [1763 'M' 35 7]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[89 'Nick of Time (1995)' 'Action|Thriller']\n",
      "喜欢看这个电影的人是：[[3631 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[90 'Journey of August King, The (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[92 'Mary Reilly (1996)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[93 'Vampire in Brooklyn (1995)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[3015 'M' 56 6]\n",
      " [5622 'M' 35 15]\n",
      " [437 'M' 35 17]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[94 'Beautiful Girls (1996)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [3015 'M' 56 6]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[95 'Broken Arrow (1996)' 'Action|Thriller']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[96 'In the Bleak Midwinter (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3031 'M' 18 4]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[97 'Hate (Haine, La) (1995)' 'Drama']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [6022 'M' 25 17]\n",
      " [3907 'F' 18 14]\n",
      " [3031 'M' 18 4]]\n",
      "您看的电影是：[98 'Shopping (1994)' 'Action|Thriller']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[99 'Heidi Fleiss: Hollywood Madam (1995)' 'Documentary']\n",
      "喜欢看这个电影的人是：[[4612 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[100 'City Hall (1996)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [428 'F' 18 4]\n",
      " [3031 'M' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[101 'Bottle Rocket (1996)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [428 'F' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[102 'Mr. Wrong (1996)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3015 'M' 56 6]\n",
      " [1763 'M' 35 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[103 'Unforgettable (1996)' 'Thriller']\n",
      "喜欢看这个电影的人是：[[3631 'M' 18 4]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [428 'F' 18 4]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[104 'Happy Gilmore (1996)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [1763 'M' 35 7]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[105 'Bridges of Madison County, The (1995)' 'Drama|Romance']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[106 'Nobody Loves Me (Keiner liebt mich) (1994)' 'Comedy|Drama']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[107 'Muppet Treasure Island (1996)' \"Adventure|Children's|Comedy|Musical\"]\n",
      "喜欢看这个电影的人是：[[1229 'F' 25 4]\n",
      " [3907 'F' 18 14]\n",
      " [1603 'F' 25 0]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[108 'Catwalk (1995)' 'Documentary']\n",
      "喜欢看这个电影的人是：[[1763 'M' 35 7]\n",
      " [2798 'M' 35 20]\n",
      " [4612 'F' 18 4]\n",
      " [5622 'M' 35 15]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " [428 'F' 18 4]]\n",
      "您看的电影是：[109 'Headless Body in Topless Bar (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [1763 'M' 35 7]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[110 'Braveheart (1995)' 'Action|Drama|War']\n",
      "喜欢看这个电影的人是：[[2798 'M' 35 20]\n",
      " [6022 'M' 25 17]\n",
      " [5622 'M' 35 15]\n",
      " [3631 'M' 18 4]\n",
      " [428 'F' 18 4]]\n",
      "您看的电影是：[111 'Taxi Driver (1976)' 'Drama|Thriller']\n",
      "喜欢看这个电影的人是：[[1967 'M' 50 7]\n",
      " [3907 'F' 18 14]\n",
      " [3874 'M' 25 7]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[112 'Rumble in the Bronx (1995)' 'Action|Adventure|Crime']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[113 'Before and After (1996)' 'Drama|Mystery']\n",
      "喜欢看这个电影的人是：[[3907 'F' 18 14]\n",
      " [3031 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [437 'M' 35 17]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[114 \"Margaret's Museum (1995)\" 'Drama']\n",
      "喜欢看这个电影的人是：[[1603 'F' 25 0]\n",
      " [1967 'M' 50 7]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[115 'Happiness Is in the Field (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[3874 'M' 25 7]\n",
      " [1603 'F' 25 0]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[116 'Anne Frank Remembered (1995)' 'Documentary']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [4612 'F' 18 4]\n",
      " [6022 'M' 25 17]\n",
      " [3015 'M' 56 6]\n",
      " [5622 'M' 35 15]]\n",
      "您看的电影是：[117 \"Young Poisoner's Handbook, The (1995)\" 'Crime']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [3031 'M' 18 4]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[118 'If Lucy Fell (1996)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[1809 'F' 25 14]\n",
      " [1763 'M' 35 7]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[119 'Steal Big, Steal Little (1995)' 'Comedy']\n",
      "喜欢看这个电影的人是：[[428 'F' 18 4]\n",
      " [1809 'F' 25 14]\n",
      " [5622 'M' 35 15]\n",
      " [3907 'F' 18 14]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[120 'Race the Sun (1996)' 'Drama']\n",
      "喜欢看这个电影的人是：[[437 'M' 35 17]\n",
      " [1809 'F' 25 14]\n",
      " [3874 'M' 25 7]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[121 'Boys of St. Vincent, The (1993)' 'Drama']\n",
      "喜欢看这个电影的人是：[[3031 'M' 18 4]\n",
      " [3874 'M' 25 7]\n",
      " [3907 'F' 18 14]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n",
      "您看的电影是：[122 'Boomerang (1992)' 'Comedy|Romance']\n",
      "喜欢看这个电影的人是：[[1967 'M' 50 7]\n",
      " [3631 'M' 18 4]\n",
      " [437 'M' 35 17]\n",
      " [5622 'M' 35 15]\n",
      " [6022 'M' 25 17]]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-25-2fb0e8191556>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      7\u001b[0m \u001b[1;31m#recommend_same_type_movie(list(movies['MovieID'][0:280]), top_k = 120)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      8\u001b[0m \u001b[1;31m#recommend_your_favorite_movie(list(users['UserID'][0:280]), top_k = 5)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 9\u001b[1;33m \u001b[0mrecommend_other_favorite_movie\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlist\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmovies\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'MovieID'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;36m280\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtop_k\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m5\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-24-c4e9cd7666a6>\u001b[0m in \u001b[0;36mrecommend_other_favorite_movie\u001b[1;34m(movie_id_val, top_k)\u001b[0m\n\u001b[0;32m    114\u001b[0m                 \u001b[0mprobs_users_embeddings\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0musers_matrics\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mfavorite_user_id\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m200\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    115\u001b[0m                 \u001b[0mprobs_similarity\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprobs_users_embeddings\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtranspose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmovie_matrics\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;31m#挑选出的top_k*200矩阵和movie_matrics矩阵相乘\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 116\u001b[1;33m                 \u001b[0msim\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mprobs_similarity\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0meval\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    117\u001b[0m                 \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0msim\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0margsort\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mtop_k\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    118\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mval\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mresults\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\framework\\ops.py\u001b[0m in \u001b[0;36meval\u001b[1;34m(self, feed_dict, session)\u001b[0m\n\u001b[0;32m    711\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    712\u001b[0m     \"\"\"\n\u001b[1;32m--> 713\u001b[1;33m     \u001b[1;32mreturn\u001b[0m \u001b[0m_eval_using_default_session\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgraph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msession\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    714\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    715\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\framework\\ops.py\u001b[0m in \u001b[0;36m_eval_using_default_session\u001b[1;34m(tensors, feed_dict, graph, session)\u001b[0m\n\u001b[0;32m   5155\u001b[0m                        \u001b[1;34m\"the tensor's graph is different from the session's \"\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   5156\u001b[0m                        \"graph.\")\n\u001b[1;32m-> 5157\u001b[1;33m   \u001b[1;32mreturn\u001b[0m \u001b[0msession\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   5158\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   5159\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    927\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    928\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 929\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    930\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    931\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1150\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1151\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1152\u001b[1;33m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m   1153\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1154\u001b[0m       \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1326\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1327\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[1;32m-> 1328\u001b[1;33m                            run_metadata)\n\u001b[0m\u001b[0;32m   1329\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1330\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1332\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1333\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1334\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1335\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1336\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   1315\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_run_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1316\u001b[0m       \u001b[1;31m# Ensure any changes to the graph are reflected in the runtime.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1317\u001b[1;33m       \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1318\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m   1319\u001b[0m           options, feed_dict, fetch_list, target_list, run_metadata)\n",
      "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python36\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_extend_graph\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m   1350\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1351\u001b[0m     \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session_run_lock\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m  \u001b[1;31m# pylint: disable=protected-access\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1352\u001b[1;33m       \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExtendSession\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1353\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1354\u001b[0m   \u001b[1;31m# The threshold to run garbage collection to delete dead tensors.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "#部分推荐结果运行此代码 运行1、3、4、5、6、7、8、13、9、11\n",
    "\n",
    "#指定用户和电影进行评分：执行rating_movie\n",
    "#推荐同类型的电影                      ：recommend_same_type_movie\n",
    "#推荐您喜欢的电影                      ：recommend_your_favorite_movie\n",
    "#看过这个电影的人还看了（喜欢）哪些电影：recommend_other_favorite_movie\n",
    "#recommend_same_type_movie(list(movies['MovieID'][0:280]), top_k = 12)\n",
    "#recommend_your_favorite_movie(list(users['UserID'][0:280]), top_k = 5)\n",
    "recommend_other_favorite_movie(list(movies['MovieID'][0:280]), top_k = 5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'file' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-26-2f72176549a0>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'file' is not defined"
     ]
    }
   ],
   "source": [
    "file.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
