{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install theano"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "Created on Aug 9, 2016\n",
    "Keras Implementation of Neural Matrix Factorization (NeuMF) recommender model in:\n",
    "He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017.  \n",
    "@author: Xiangnan He (xiangnanhe@gmail.com)\n",
    "'''\n",
    "import numpy as np\n",
    "\n",
    "import theano\n",
    "import theano.tensor as T\n",
    "import keras\n",
    "from keras import backend as K\n",
    "from keras import initializers\n",
    "from keras.regularizers import l1, l2, l1_l2\n",
    "from keras.models import Sequential, Model\n",
    "from keras.layers.core import Dense, Lambda, Activation\n",
    "from keras.layers import Embedding, Input, Dense, Concatenate, Reshape, Flatten, Dropout, Multiply\n",
    "from keras.optimizers import Adagrad, Adam, SGD, RMSprop\n",
    "from evaluate import evaluate_model\n",
    "from Dataset import Dataset\n",
    "from time import time\n",
    "import sys\n",
    "import GMF, MLP\n",
    "import argparse\n",
    "\n",
    "\n",
    "#def init_normal(shape, name=None):\n",
    "#    return initializations.RandomNormal(shape, scale=0.01, name=name)\n",
    "\n",
    "def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0):\n",
    "    assert len(layers) == len(reg_layers)\n",
    "    num_layer = len(layers) #Number of layers in the MLP\n",
    "    # Input variables\n",
    "    user_input = Input(shape=(1,), dtype='int32', name = 'user_input')\n",
    "    item_input = Input(shape=(1,), dtype='int32', name = 'item_input')\n",
    "    \n",
    "    # Embedding layer\n",
    "    MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user',\n",
    "                                  embeddings_initializer='uniform', embeddings_regularizer = l2(reg_mf), input_length=1)\n",
    "    \n",
    "    MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item',\n",
    "                                  embeddings_initializer='uniform', embeddings_regularizer = l2(reg_mf), input_length=1)   \n",
    "\n",
    "    MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = \"mlp_embedding_user\",\n",
    "                                  embeddings_initializer='uniform', embeddings_regularizer = l2(reg_layers[0]), input_length=1)\n",
    "    MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'mlp_embedding_item',\n",
    "                                  embeddings_initializer='uniform', embeddings_regularizer = l2(reg_layers[0]), input_length=1)   \n",
    "    \n",
    "    # MF part\n",
    "    mf_user_latent = Flatten()(MF_Embedding_User(user_input))\n",
    "    mf_item_latent = Flatten()(MF_Embedding_Item(item_input))\n",
    "    mf_vector = Multiply()([mf_user_latent, mf_item_latent]) # NOTE: the layer is first constructed and then it's called on its input\n",
    "    #mf_vector = merge([mf_user_latent, mf_item_latent], mode = 'mul') # element-wise multiply\n",
    "\n",
    "    # MLP part \n",
    "    mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))\n",
    "    mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))\n",
    "    mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent]) # NOTE: the layer is first constructed and then it's called on its input\n",
    "    #mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode = 'concat')\n",
    "    for idx in range(1, num_layer):\n",
    "        layer = Dense(layers[idx], kernel_regularizer = l2(reg_layers[idx]), activation='relu', name=\"layer%d\" %idx)\n",
    "        mlp_vector = layer(mlp_vector)\n",
    "\n",
    "    # Concatenate MF and MLP parts\n",
    "    #mf_vector = Lambda(lambda x: x * alpha)(mf_vector)\n",
    "    #mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)\n",
    "    \n",
    "    predict_vector = Concatenate()([mf_vector, mlp_vector]) # NOTE: the layer is first constructed and then it's called on its input\n",
    "    #predict_vector = merge([mf_vector, mlp_vector], mode = 'concat') <- fade-out\n",
    "    \n",
    "    # Final prediction layer\n",
    "    prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = \"prediction\")(predict_vector)\n",
    "    \n",
    "    model = Model(inputs=[user_input, item_input], \n",
    "                  outputs=prediction)\n",
    "    \n",
    "    return model\n",
    "\n",
    "def load_pretrain_model(model, gmf_model, mlp_model, num_layers):\n",
    "    # MF embeddings\n",
    "    gmf_user_embeddings = gmf_model.get_layer('user_embedding').get_weights()\n",
    "    gmf_item_embeddings = gmf_model.get_layer('item_embedding').get_weights()\n",
    "    model.get_layer('mf_embedding_user').set_weights(gmf_user_embeddings)\n",
    "    model.get_layer('mf_embedding_item').set_weights(gmf_item_embeddings)\n",
    "    \n",
    "    # MLP embeddings\n",
    "    mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()\n",
    "    mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()\n",
    "    model.get_layer('mlp_embedding_user').set_weights(mlp_user_embeddings)\n",
    "    model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings)\n",
    "    \n",
    "    # MLP layers\n",
    "    for i in range(1, num_layers):\n",
    "        mlp_layer_weights = mlp_model.get_layer('layer%d' %i).get_weights()\n",
    "        model.get_layer('layer%d' %i).set_weights(mlp_layer_weights)\n",
    "        \n",
    "    # Prediction weights\n",
    "    gmf_prediction = gmf_model.get_layer('prediction').get_weights()\n",
    "    mlp_prediction = mlp_model.get_layer('prediction').get_weights()\n",
    "    new_weights = np.concatenate((gmf_prediction[0], mlp_prediction[0]), axis=0)\n",
    "    new_b = gmf_prediction[1] + mlp_prediction[1]\n",
    "    model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b])    \n",
    "    return model\n",
    "\n",
    "def get_train_instances(train, num_negatives):\n",
    "    user_input, item_input, labels = [],[],[]\n",
    "    num_users = train.shape[0]\n",
    "    for (u, i) in train.keys():\n",
    "        # positive instance\n",
    "        user_input.append(u)\n",
    "        item_input.append(i)\n",
    "        labels.append(1)\n",
    "        # negative instances\n",
    "        for t in range(num_negatives):\n",
    "            j = np.random.randint(num_items)\n",
    "            while ((u, j) in train) :\n",
    "                j = np.random.randint(num_items)\n",
    "            user_input.append(u)\n",
    "            item_input.append(j)\n",
    "            labels.append(0)\n",
    "    return user_input, item_input, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Load data done [11.3 s]. #user=6040, #item=3706, #train=994169, #test=6040\n",
      "Init: HR = 0.1068, NDCG = 0.0472\n",
      "Iteration 0 [45.8 s]: HR = 0.6003, NDCG = 0.3446, loss = 0.3199 [96.8 s]\n",
      "Iteration 1 [45.1 s]: HR = 0.6373, NDCG = 0.3719, loss = 0.2746 [96.8 s]\n",
      "Iteration 2 [45.1 s]: HR = 0.6575, NDCG = 0.3861, loss = 0.2640 [98.1 s]\n",
      "Iteration 3 [45.3 s]: HR = 0.6623, NDCG = 0.3907, loss = 0.2578 [96.6 s]\n",
      "Iteration 4 [44.9 s]: HR = 0.6712, NDCG = 0.3958, loss = 0.2534 [96.4 s]\n",
      "Iteration 5 [45.0 s]: HR = 0.6783, NDCG = 0.4017, loss = 0.2502 [96.9 s]\n",
      "Iteration 6 [44.8 s]: HR = 0.6808, NDCG = 0.4046, loss = 0.2472 [95.4 s]\n",
      "Iteration 7 [44.9 s]: HR = 0.6816, NDCG = 0.4033, loss = 0.2450 [96.2 s]\n",
      "Iteration 8 [44.5 s]: HR = 0.6829, NDCG = 0.4059, loss = 0.2430 [96.1 s]\n",
      "Iteration 9 [44.9 s]: HR = 0.6798, NDCG = 0.4021, loss = 0.2412 [98.2 s]\n",
      "Iteration 10 [45.0 s]: HR = 0.6846, NDCG = 0.4087, loss = 0.2394 [97.0 s]\n",
      "Iteration 11 [45.0 s]: HR = 0.6801, NDCG = 0.4059, loss = 0.2379 [97.3 s]\n",
      "Iteration 12 [45.0 s]: HR = 0.6793, NDCG = 0.4054, loss = 0.2365 [95.5 s]\n",
      "Iteration 13 [44.8 s]: HR = 0.6772, NDCG = 0.4058, loss = 0.2350 [96.1 s]\n",
      "Iteration 14 [44.8 s]: HR = 0.6849, NDCG = 0.4106, loss = 0.2337 [96.1 s]\n",
      "Iteration 15 [45.0 s]: HR = 0.6849, NDCG = 0.4093, loss = 0.2325 [96.8 s]\n",
      "Iteration 16 [46.6 s]: HR = 0.6747, NDCG = 0.4029, loss = 0.2315 [96.1 s]\n",
      "Iteration 17 [44.9 s]: HR = 0.6798, NDCG = 0.4093, loss = 0.2306 [95.7 s]\n",
      "Iteration 18 [44.8 s]: HR = 0.6772, NDCG = 0.4055, loss = 0.2297 [96.0 s]\n",
      "Iteration 19 [44.8 s]: HR = 0.6810, NDCG = 0.4088, loss = 0.2288 [95.9 s]\n",
      "End. Best Iteration 14:  HR = 0.6849, NDCG = 0.4106. \n",
      "The best NeuMF model is saved to /data/workspace/skinet/Pretrain/ml-1m_NeuMF_8_[64, 32, 16, 8]_1623214404.h5\n"
     ]
    }
   ],
   "source": [
    "    num_epochs = 20\n",
    "    batch_size = 256\n",
    "    mf_dim = 8\n",
    "    layers = [64,32,16,8]\n",
    "    reg_mf = 0\n",
    "    reg_layers = [0,0,0,0]\n",
    "    num_negatives = 4\n",
    "    learning_rate = 0.001\n",
    "    learner = 'adam'\n",
    "    verbose = 1\n",
    "    mf_pretrain = ''\n",
    "    mlp_pretrain = ''\n",
    "    out = 1\n",
    "    dataset_name = 'ml-1m'\n",
    "    path = '/data/workspace/skinet/myfiles/data/'\n",
    "            \n",
    "    topK = 10\n",
    "    evaluation_threads = 1#mp.cpu_count()\n",
    "    model_out_file = '/data/workspace/skinet/Pretrain/%s_NeuMF_%d_%s_%d.h5' %('ml-1m', mf_dim, layers, time())\n",
    "\n",
    "    # Loading data\n",
    "    t1 = time()\n",
    "    dataset = Dataset(path + dataset_name)\n",
    "    train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives\n",
    "    num_users, num_items = train.shape\n",
    "    print(\"Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d\" \n",
    "          %(time()-t1, num_users, num_items, train.nnz, len(testRatings)))\n",
    "    \n",
    "    # Build model\n",
    "    model = get_model(num_users, num_items, mf_dim, layers, reg_layers, reg_mf)\n",
    "    if learner.lower() == \"adagrad\": \n",
    "        model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')\n",
    "    elif learner.lower() == \"rmsprop\":\n",
    "        model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')\n",
    "    elif learner.lower() == \"adam\":\n",
    "        model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')\n",
    "    else:\n",
    "        model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')\n",
    "    \n",
    "    # Load pretrain model\n",
    "    if mf_pretrain != '' and mlp_pretrain != '':\n",
    "        gmf_model = GMF.get_model(num_users,num_items,mf_dim)\n",
    "        gmf_model.load_weights(mf_pretrain)\n",
    "        mlp_model = MLP.get_model(num_users,num_items, layers, reg_layers)\n",
    "        mlp_model.load_weights(mlp_pretrain)\n",
    "        model = load_pretrain_model(model, gmf_model, mlp_model, len(layers))\n",
    "        print(\"Load pretrained GMF (%s) and MLP (%s) models done. \" %(mf_pretrain, mlp_pretrain))\n",
    "        \n",
    "    # Init performance\n",
    "    (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)\n",
    "    hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()\n",
    "    print('Init: HR = %.4f, NDCG = %.4f' % (hr, ndcg))\n",
    "    best_hr, best_ndcg, best_iter = hr, ndcg, -1\n",
    "    if out > 0:\n",
    "        model.save_weights(model_out_file, overwrite=True) \n",
    "        \n",
    "    # Training model\n",
    "    for epoch in range(num_epochs):\n",
    "        t1 = time()\n",
    "        # Generate training instances\n",
    "        user_input, item_input, labels = get_train_instances(train, num_negatives)\n",
    "        \n",
    "        # Training\n",
    "        hist = model.fit([np.array(user_input), np.array(item_input)], #input\n",
    "                         np.array(labels), # labels \n",
    "                         batch_size=batch_size, epochs=1, verbose=0, shuffle=True)\n",
    "        t2 = time()\n",
    "        \n",
    "        # Evaluation\n",
    "        if epoch %verbose == 0:\n",
    "            (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)\n",
    "            hr, ndcg, loss = np.array(hits).mean(), np.array(ndcgs).mean(), hist.history['loss'][0]\n",
    "            print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]' \n",
    "                  % (epoch,  t2-t1, hr, ndcg, loss, time()-t2))\n",
    "            if hr > best_hr:\n",
    "                best_hr, best_ndcg, best_iter = hr, ndcg, epoch\n",
    "                if out > 0:\n",
    "                    model.save_weights(model_out_file, overwrite=True)\n",
    "\n",
    "    print(\"End. Best Iteration %d:  HR = %.4f, NDCG = %.4f. \" %(best_iter, best_hr, best_ndcg))\n",
    "    if out > 0:\n",
    "        print(\"The best NeuMF model is saved to %s\" %(model_out_file))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
