{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('trainset_movie.json','r') as fopen:\n",
    "    trainset = json.loads(fopen.read())\n",
    "    \n",
    "with open('testset_movie.json','r') as fopen:\n",
    "    testset = json.loads(fopen.read())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "movie_info = {}\n",
    "with open('movies.dat','r',encoding = \"ISO-8859-1\") as fopen:\n",
    "    for i in list(filter(None,fopen.read().split('\\n'))):\n",
    "        i = i.split('::')\n",
    "        movie_info[int(i[0])]={'title':i[1][:-7],'genre':i[2].split('|')}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def fn(creator):\n",
    "    train_df = {'user_id': [], 'gender_id': [], 'age_id': [], 'job_id': [], 'movie_id': [],\n",
    "                'category_ids': [], 'movie_title': [], 'score': []}\n",
    "    for train_sample in creator:\n",
    "        train_sample = train_sample[1]\n",
    "        uid = train_sample[0]\n",
    "        mov_id = train_sample[4]\n",
    "        mov_dict = movie_info[mov_id]\n",
    "        train_df['user_id'].append(train_sample[0])\n",
    "        train_df['gender_id'].append(train_sample[1])\n",
    "        train_df['age_id'].append(train_sample[2])\n",
    "        train_df['job_id'].append(train_sample[3])\n",
    "        train_df['movie_id'].append(train_sample[4])\n",
    "        category_ids = [str(idx) for idx in train_sample[5]]\n",
    "        train_df['category_ids'].append(' '.join(category_ids))\n",
    "        movie_title_idx = [str(idx+1) for idx in train_sample[6]]\n",
    "        train_df['movie_title'].append(' '.join(movie_title_idx))\n",
    "        train_df['score'].append(train_sample[7][0])\n",
    "    return train_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = pd.DataFrame(fn(trainset))\n",
    "test_df = pd.DataFrame(fn(testset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age_id</th>\n",
       "      <th>category_ids</th>\n",
       "      <th>gender_id</th>\n",
       "      <th>job_id</th>\n",
       "      <th>movie_id</th>\n",
       "      <th>movie_title</th>\n",
       "      <th>score</th>\n",
       "      <th>user_id</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>1193</td>\n",
       "      <td>1829 4270 329 989 1177 2285</td>\n",
       "      <td>5.0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0</td>\n",
       "      <td>10 8 9</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>661</td>\n",
       "      <td>1723 1909 989 3295 3852</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0</td>\n",
       "      <td>9 7</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>914</td>\n",
       "      <td>240 742 2373</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>3408</td>\n",
       "      <td>4764 3837</td>\n",
       "      <td>3.0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0</td>\n",
       "      <td>10 8 13</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>2355</td>\n",
       "      <td>4869 2236 2942</td>\n",
       "      <td>5.0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   age_id category_ids  gender_id  job_id  movie_id  \\\n",
       "0       0            1          1      10      1193   \n",
       "1       0       10 8 9          1      10       661   \n",
       "2       0          9 7          1      10       914   \n",
       "3       0            1          1      10      3408   \n",
       "4       0      10 8 13          1      10      2355   \n",
       "\n",
       "                   movie_title  score  user_id  \n",
       "0  1829 4270 329 989 1177 2285    5.0        1  \n",
       "1      1723 1909 989 3295 3852    1.0        1  \n",
       "2                 240 742 2373    1.0        1  \n",
       "3                    4764 3837    3.0        1  \n",
       "4               4869 2236 2942    5.0        1  "
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data(df):\n",
    "\n",
    "    category_ids = []\n",
    "    for category_id in df['category_ids'].values:\n",
    "        temp_li = [0] * 18\n",
    "        category_id_li = category_id.split()\n",
    "        for idx in category_id_li:\n",
    "            temp_li[int(idx)] = 1\n",
    "        category_ids.append(temp_li)\n",
    "    \n",
    "    movie_titles = []\n",
    "    for mov_title in df['movie_title'].values:\n",
    "        temp_li = [0] * 10\n",
    "        mov_title_li = mov_title.split()\n",
    "        for i in range(len(mov_title_li[:10])):\n",
    "            temp_li[i] = int(mov_title_li[i])\n",
    "        movie_titles.append(temp_li)\n",
    "        \n",
    "    return df['score'],df['user_id'].values,df['gender_id'].values,df['gender_id'].values,df['job_id'].values,df['movie_id'].values,np.array(category_ids),np.array(movie_titles)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "Y,user_ids,gender_ids,age_ids,job_ids,movie_ids,category_ids,movie_titles = get_data(train_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def global_max_pooling(x):\n",
    "    batch_size = tf.shape(x)[0]\n",
    "    num_units = x.get_shape().as_list()[-1]\n",
    "    x = tf.layers.max_pooling1d(x, x.get_shape().as_list()[1], 1)\n",
    "    x = tf.reshape(x, [batch_size, num_units])\n",
    "    return x\n",
    "\n",
    "def cos_sim(x1, x2, scale=1):\n",
    "    x1_norm = tf.nn.l2_normalize(x1, -1)\n",
    "    x2_norm = tf.nn.l2_normalize(x2, -1)\n",
    "    cos_sim = tf.reduce_sum(tf.multiply(x1_norm, x2_norm), -1)\n",
    "    return scale * cos_sim\n",
    "\n",
    "class Model:\n",
    "    def __init__(self,movie_id_size,job_id_size,user_id_size,\n",
    "                age_id_size,movie_title_vocab_size,learning_rate):\n",
    "        self.user_id = tf.placeholder(tf.int32, [None])\n",
    "        self.gender_id = tf.placeholder(tf.int32, [None])\n",
    "        self.age_id = tf.placeholder(tf.int32, [None])\n",
    "        self.job_id = tf.placeholder(tf.int32, [None])\n",
    "        self.movie_id = tf.placeholder(tf.int32, [None])\n",
    "        self.category_ids = tf.placeholder(tf.int32, [None, 18])\n",
    "        self.movie_title = tf.placeholder(tf.int32, [None, 10])\n",
    "        self.Y = tf.placeholder(tf.float32, [None])\n",
    "        \n",
    "        with tf.variable_scope('user_id'):\n",
    "            user_id_embed = tf.contrib.layers.embed_sequence(\n",
    "                 ids = self.user_id,\n",
    "                 vocab_size = user_id_size,\n",
    "                 embed_dim = 32)\n",
    "            user_id_fc = tf.layers.dense(user_id_embed, 32)\n",
    "            \n",
    "        with tf.variable_scope('gender_id'):\n",
    "            gender_id_embed = tf.contrib.layers.embed_sequence(\n",
    "                ids = self.gender_id,\n",
    "                vocab_size = 2,\n",
    "                embed_dim = 16)\n",
    "            gender_id_fc = tf.layers.dense(gender_id_embed, 16)\n",
    "            \n",
    "        with tf.variable_scope('age_id'):\n",
    "            age_id_embed = tf.contrib.layers.embed_sequence(\n",
    "                ids = self.age_id,\n",
    "                vocab_size = age_id_size,\n",
    "                embed_dim = 16)\n",
    "            age_id_fc = tf.layers.dense(age_id_embed, 16)\n",
    "            \n",
    "        with tf.variable_scope('job_id'):\n",
    "            job_id_embed = tf.contrib.layers.embed_sequence(\n",
    "                ids = self.job_id,\n",
    "                vocab_size = job_id_size,\n",
    "                embed_dim = 16)\n",
    "            job_id_fc = tf.layers.dense(job_id_embed, 16)\n",
    "            \n",
    "        user_feats = tf.concat([user_id_fc, gender_id_fc, age_id_fc, job_id_fc], -1)\n",
    "        self.user_feats = tf.layers.dense(user_feats, 200, tf.tanh)\n",
    "        \n",
    "        with tf.variable_scope('movie_id'):\n",
    "            movie_id_embed = tf.contrib.layers.embed_sequence(\n",
    "                ids = self.movie_id,\n",
    "                vocab_size = movie_id_size,\n",
    "                embed_dim = 32)\n",
    "            movie_id_fc = tf.layers.dense(movie_id_embed, 32)\n",
    "            \n",
    "        with tf.variable_scope('category_ids'):\n",
    "            category_fc = tf.layers.dense(tf.to_float(self.category_ids), 32)\n",
    "\n",
    "        with tf.variable_scope('movie_title'):\n",
    "            movie_title_embed = tf.contrib.layers.embed_sequence(\n",
    "                ids = self.movie_title,\n",
    "                vocab_size = movie_title_vocab_size,\n",
    "                embed_dim = 32)\n",
    "            movie_title_conv = tf.layers.conv1d(movie_title_embed, 32, 3)\n",
    "            movie_title_fc = global_max_pooling(movie_title_conv)\n",
    "            \n",
    "        movie_feats = tf.concat([movie_id_fc, category_fc, movie_title_fc], -1)\n",
    "        self.movie_feats = tf.layers.dense(movie_feats, 200, tf.tanh)\n",
    "        self.global_step = tf.Variable(0, trainable=False)\n",
    "        predicted_score = cos_sim(self.user_feats, self.movie_feats, scale=5)\n",
    "        self.cost = tf.reduce_mean(tf.squared_difference(predicted_score, self.Y))\n",
    "        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost,global_step=self.global_step)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "size_user_ids = np.unique(user_ids).shape[0]\n",
    "size_movie_ids = np.unique(movie_ids).shape[0]\n",
    "size_job_ids = np.unique(job_ids).shape[0]\n",
    "size_age_ids = np.unique(age_ids).shape[0]\n",
    "learning_rate = 1e-4\n",
    "size_movie_titles = 5175\n",
    "epoch = 1\n",
    "batch_size = 128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "sess = tf.InteractiveSession()\n",
    "model = Model(size_movie_ids,size_job_ids,size_user_ids,size_age_ids,size_movie_titles,learning_rate)\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 1, step 1, loss 9.271559\n",
      "epoch 1, step 500, loss 2.213113\n",
      "epoch 1, step 1000, loss 4.583295\n",
      "epoch 1, step 1500, loss 1.837167\n",
      "epoch 1, step 2000, loss 2.927567\n",
      "epoch 1, step 2500, loss 4.564452\n",
      "epoch 1, step 3000, loss 2.903585\n",
      "epoch 1, step 3500, loss 2.202424\n",
      "epoch 1, step 4000, loss 4.401066\n",
      "epoch 1, step 4500, loss 3.354321\n",
      "epoch 1, step 5000, loss 4.106241\n",
      "epoch 1, step 5500, loss 4.069396\n",
      "epoch 1, step 6000, loss 4.347765\n",
      "epoch 1, step 6500, loss 3.043517\n",
      "epoch 1, step 7000, loss 4.156744\n",
      "epoch 1, avg loss 4.032794\n"
     ]
    }
   ],
   "source": [
    "for i in range(epoch):\n",
    "    total_cost = 0\n",
    "    for k in range(0, (Y.shape[0] // batch_size)*batch_size,batch_size):\n",
    "        batch_user = user_ids[k:k+batch_size]\n",
    "        batch_movie = movie_ids[k:k+batch_size]\n",
    "        batch_job = job_ids[k:k+batch_size]\n",
    "        batch_age = age_ids[k:k+batch_size]\n",
    "        batch_gender = gender_ids[k:k+batch_size]\n",
    "        batch_category = category_ids[k:k+batch_size]\n",
    "        batch_title = movie_titles[k:k+batch_size]\n",
    "        batch_y = Y[k:k+batch_size]\n",
    "        step, loss, _ = sess.run([model.global_step, model.cost, model.optimizer],\n",
    "                                feed_dict={model.user_id:batch_user, model.gender_id:batch_gender,\n",
    "                                           model.age_id:batch_age, model.job_id: batch_job,\n",
    "                                           model.movie_id:batch_movie, model.category_ids:batch_category,\n",
    "                                           model.movie_title: batch_title, model.Y:batch_y})\n",
    "        if step % 500 == 0 or step == 1:\n",
    "            print('epoch %d, step %d, loss %f'%(i+1,step,loss))\n",
    "        total_cost += loss\n",
    "    total_cost /= (Y.shape[0] // batch_size)\n",
    "    print('epoch %d, avg loss %f'%(i+1,total_cost))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
