{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Untitled0.ipynb",
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "RasF2Zg66y-o"
      },
      "source": [
        ""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eqElwgPd7_fk",
        "outputId": "137b78ac-1586-44ff-8986-73a696c27cee",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 617
        }
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m    728\u001b[0m             \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 729\u001b[0;31m                 \u001b[0mident\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreply\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstdin_socket\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    730\u001b[0m             \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/jupyter_client/session.py\u001b[0m in \u001b[0;36mrecv\u001b[0;34m(self, socket, mode, content, copy)\u001b[0m\n\u001b[1;32m    802\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 803\u001b[0;31m             \u001b[0mmsg_list\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msocket\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv_multipart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    804\u001b[0m         \u001b[0;32mexcept\u001b[0m \u001b[0mzmq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mZMQError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/zmq/sugar/socket.py\u001b[0m in \u001b[0;36mrecv_multipart\u001b[0;34m(self, flags, copy, track)\u001b[0m\n\u001b[1;32m    490\u001b[0m         \"\"\"\n\u001b[0;32m--> 491\u001b[0;31m         \u001b[0mparts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mflags\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrack\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrack\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    492\u001b[0m         \u001b[0;31m# have first part already, only loop while more to receive\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket.Socket.recv\u001b[0;34m()\u001b[0m\n",
            "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket.Socket.recv\u001b[0;34m()\u001b[0m\n",
            "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket._recv_copy\u001b[0;34m()\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/zmq/backend/cython/checkrc.pxd\u001b[0m in \u001b[0;36mzmq.backend.cython.checkrc._check_rc\u001b[0;34m()\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: ",
            "\nDuring handling of the above exception, another exception occurred:\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-1-d5df0069828e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdrive\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mdrive\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/content/drive'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/google/colab/drive.py\u001b[0m in \u001b[0;36mmount\u001b[0;34m(mountpoint, force_remount, timeout_ms, use_metadata_server)\u001b[0m\n\u001b[1;32m    249\u001b[0m       \u001b[0;32mwith\u001b[0m \u001b[0m_output\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muse_tags\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'dfs-auth-dance'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    250\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfifo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'w'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mfifo_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 251\u001b[0;31m           \u001b[0mfifo_file\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mget_code\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mauth_prompt\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'\\n'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    252\u001b[0m       \u001b[0mwrote_to_fifo\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    253\u001b[0m     \u001b[0;32melif\u001b[0m \u001b[0mcase\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36mraw_input\u001b[0;34m(self, prompt)\u001b[0m\n\u001b[1;32m    702\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_ident\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    703\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 704\u001b[0;31m             \u001b[0mpassword\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    705\u001b[0m         )\n\u001b[1;32m    706\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m    732\u001b[0m             \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    733\u001b[0m                 \u001b[0;31m# re-raise KeyboardInterrupt, to truncate traceback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 734\u001b[0;31m                 \u001b[0;32mraise\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    735\u001b[0m             \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    736\u001b[0m                 \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Y78EqqQS8OlN"
      },
      "source": [
        "import pandas as pd\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt"
      ],
      "execution_count": 1,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "pP8Rjf6T8WiB",
        "outputId": "e2e297fd-0804-4537-acc4-3d29e93a207b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 646
        }
      },
      "source": [
        "!wget \"http://files.grouplens.org/datasets/movielens/ml-100k.zip\"\n",
        "!unzip ml-100k.zip\n",
        "!ls"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "--2020-10-05 14:51:04--  http://files.grouplens.org/datasets/movielens/ml-100k.zip\n",
            "Resolving files.grouplens.org (files.grouplens.org)... 128.101.65.152\n",
            "Connecting to files.grouplens.org (files.grouplens.org)|128.101.65.152|:80... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 4924029 (4.7M) [application/zip]\n",
            "Saving to: ‘ml-100k.zip’\n",
            "\n",
            "ml-100k.zip         100%[===================>]   4.70M  5.60MB/s    in 0.8s    \n",
            "\n",
            "2020-10-05 14:51:05 (5.60 MB/s) - ‘ml-100k.zip’ saved [4924029/4924029]\n",
            "\n",
            "Archive:  ml-100k.zip\n",
            "   creating: ml-100k/\n",
            "  inflating: ml-100k/allbut.pl       \n",
            "  inflating: ml-100k/mku.sh          \n",
            "  inflating: ml-100k/README          \n",
            "  inflating: ml-100k/u.data          \n",
            "  inflating: ml-100k/u.genre         \n",
            "  inflating: ml-100k/u.info          \n",
            "  inflating: ml-100k/u.item          \n",
            "  inflating: ml-100k/u.occupation    \n",
            "  inflating: ml-100k/u.user          \n",
            "  inflating: ml-100k/u1.base         \n",
            "  inflating: ml-100k/u1.test         \n",
            "  inflating: ml-100k/u2.base         \n",
            "  inflating: ml-100k/u2.test         \n",
            "  inflating: ml-100k/u3.base         \n",
            "  inflating: ml-100k/u3.test         \n",
            "  inflating: ml-100k/u4.base         \n",
            "  inflating: ml-100k/u4.test         \n",
            "  inflating: ml-100k/u5.base         \n",
            "  inflating: ml-100k/u5.test         \n",
            "  inflating: ml-100k/ua.base         \n",
            "  inflating: ml-100k/ua.test         \n",
            "  inflating: ml-100k/ub.base         \n",
            "  inflating: ml-100k/ub.test         \n",
            "ml-100k  ml-100k.zip  sample_data\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tA5_lDbsPpox",
        "outputId": "89c69985-176c-4a64-cb3e-bd1929c9f264",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 323
        }
      },
      "source": [
        "!wget \"http://files.grouplens.org/datasets/movielens/ml-1m.zip\"\n",
        "!unzip ml-1m.zip\n",
        "!ls"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "--2020-10-05 14:51:19--  http://files.grouplens.org/datasets/movielens/ml-1m.zip\n",
            "Resolving files.grouplens.org (files.grouplens.org)... 128.101.65.152\n",
            "Connecting to files.grouplens.org (files.grouplens.org)|128.101.65.152|:80... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 5917549 (5.6M) [application/zip]\n",
            "Saving to: ‘ml-1m.zip’\n",
            "\n",
            "ml-1m.zip           100%[===================>]   5.64M  6.72MB/s    in 0.8s    \n",
            "\n",
            "2020-10-05 14:51:21 (6.72 MB/s) - ‘ml-1m.zip’ saved [5917549/5917549]\n",
            "\n",
            "Archive:  ml-1m.zip\n",
            "   creating: ml-1m/\n",
            "  inflating: ml-1m/movies.dat        \n",
            "  inflating: ml-1m/ratings.dat       \n",
            "  inflating: ml-1m/README            \n",
            "  inflating: ml-1m/users.dat         \n",
            "ml-100k  ml-100k.zip  ml-1m  ml-1m.zip\tsample_data\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "quPn_LWwPtdt"
      },
      "source": [
        "import torch\n",
        "import torch.nn as nn #For neural network\n",
        "import torch.nn.parallel #For parallel computation\n",
        "import torch.optim as optim #For optimizers\n",
        "import torch.utils.data \n",
        "from torch.autograd import Variable # For gradient descent"
      ],
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KIGjxK2OP6d5"
      },
      "source": [
        "# We won't be using this dataset. Just for observing\n",
        "movies = pd.read_csv('ml-1m/movies.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')\n",
        "users = pd.read_csv('ml-1m/users.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')\n",
        "ratings = pd.read_csv('ml-1m/ratings.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1')"
      ],
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GMU7s5oNQLYg"
      },
      "source": [
        "training_set = pd.read_csv('ml-100k/u1.base', delimiter = '\\t')\n",
        "training_set = np.array(training_set, dtype = 'int')\n",
        "test_set = pd.read_csv('ml-100k/u1.test', delimiter = '\\t')\n",
        "test_set = np.array(test_set, dtype = 'int')"
      ],
      "execution_count": 44,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "yRURCe6VQNkt",
        "outputId": "6e521572-ef54-47a9-9eb4-340cc758b3a0",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 136
        }
      },
      "source": [
        "training_set\n"
      ],
      "execution_count": 45,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "array([[        1,         2,         3, 876893171],\n",
              "       [        1,         3,         4, 878542960],\n",
              "       [        1,         4,         3, 876893119],\n",
              "       ...,\n",
              "       [      943,      1188,         3, 888640250],\n",
              "       [      943,      1228,         3, 888640275],\n",
              "       [      943,      1330,         3, 888692465]])"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 45
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BBYFLcxTQgRk"
      },
      "source": [
        "nb_users= int(max(max(training_set[:,0]), max(test_set[:,0])))\n",
        "nb_movies= int(max(max(training_set[:,1]), max(test_set[:,1])))"
      ],
      "execution_count": 46,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qctGASxyUUfS"
      },
      "source": [
        "def convert(data):\n",
        "  new_data = []\n",
        "  for id_users in range(1, nb_users + 1):\n",
        "    id_movies = data[:, 1] [data[:, 0] == id_users]\n",
        "    id_ratings = data[:, 2] [data[:, 0] == id_users]\n",
        "    ratings = np.zeros(nb_movies)\n",
        "    ratings[id_movies - 1] = id_ratings\n",
        "    new_data.append(list(ratings))\n",
        "  return new_data\n",
        "training_set = convert(training_set)\n",
        "test_set = convert(test_set)"
      ],
      "execution_count": 47,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "K36EvHdbUuGe",
        "outputId": "fb6aa195-d97a-4918-acfd-5e947629e9b3",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 136
        }
      },
      "source": [
        "training_set= torch.FloatTensor(training_set)\n",
        "test_set= torch.FloatTensor(test_set)\n",
        "training_set"
      ],
      "execution_count": 93,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "tensor([[0., 3., 4.,  ..., 0., 0., 0.],\n",
              "        [4., 0., 0.,  ..., 0., 0., 0.],\n",
              "        [0., 0., 0.,  ..., 0., 0., 0.],\n",
              "        ...,\n",
              "        [5., 0., 0.,  ..., 0., 0., 0.],\n",
              "        [0., 0., 0.,  ..., 0., 0., 0.],\n",
              "        [0., 5., 0.,  ..., 0., 0., 0.]])"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 93
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XgARQfxUVsKV"
      },
      "source": [
        "#Now we build our model. Used pytorch to build this model.\n",
        "class SAE(nn.Module):\n",
        "  def __init__(self):\n",
        "    super (SAE,self).__init__()\n",
        "    self.fclayer1= nn.Linear(nb_movies,20)\n",
        "    self.fclayer2= nn.Linear(20,10)\n",
        "    self.fclayer3= nn.Linear(10,20)\n",
        "    self.fclayer4= nn.Linear(20, nb_movies)\n",
        "    self.activation= nn.Sigmoid()\n",
        "  def forward(self,x):\n",
        "    x= self.activation(self.fclayer1(x))\n",
        "    x= self.activation(self.fclayer2(x))\n",
        "    x= self.activation(self.fclayer3(x))\n",
        "    x= self.fclayer4(x)\n",
        "    return x\n"
      ],
      "execution_count": 131,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0-6Zcy05XAaL"
      },
      "source": [
        "sae= SAE()\n",
        "criterion= nn.MSELoss()\n",
        "opt= optim.RMSprop(sae.parameters(), lr=0.01, weight_decay= 0.5)"
      ],
      "execution_count": 132,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WIyf87ell3d_",
        "outputId": "694415b9-39c8-418f-99e3-b53e5bbd3a18",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "epochs=200\n",
        "for i in range(epochs):\n",
        "  train_loss=0\n",
        "  s=0.\n",
        "  for id_user in range(nb_users):\n",
        "    input= Variable(training_set[id_user]).unsqueeze(0)\n",
        "    target= input.clone()\n",
        "    if torch.sum(target.data>0)>0:\n",
        "      output= sae(input)\n",
        "      target.required_grad=False\n",
        "      output[target==0]=0\n",
        "      loss= criterion(output, target)\n",
        "      mean_corrector= nb_movies/float(torch.sum(target.data>0)+ 1e-10)\n",
        "      loss.backward()\n",
        "      train_loss += np.sqrt(loss.data*mean_corrector)\n",
        "      opt.step()\n",
        "      s+=1.0\n",
        "  print(f'Epoch:{i}:  Loss is {train_loss/s}')\n",
        "\n",
        "\n",
        "\n"
      ],
      "execution_count": 133,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Epoch:0:  Loss is 1.7709524631500244\n",
            "Epoch:1:  Loss is 1.0965514183044434\n",
            "Epoch:2:  Loss is 1.053122878074646\n",
            "Epoch:3:  Loss is 1.0382765531539917\n",
            "Epoch:4:  Loss is 1.03093683719635\n",
            "Epoch:5:  Loss is 1.0266313552856445\n",
            "Epoch:6:  Loss is 1.0235675573349\n",
            "Epoch:7:  Loss is 1.0219874382019043\n",
            "Epoch:8:  Loss is 1.0206352472305298\n",
            "Epoch:9:  Loss is 1.0196210145950317\n",
            "Epoch:10:  Loss is 1.0187708139419556\n",
            "Epoch:11:  Loss is 1.0180779695510864\n",
            "Epoch:12:  Loss is 1.01774263381958\n",
            "Epoch:13:  Loss is 1.0174723863601685\n",
            "Epoch:14:  Loss is 1.017245888710022\n",
            "Epoch:15:  Loss is 1.0168769359588623\n",
            "Epoch:16:  Loss is 1.0165348052978516\n",
            "Epoch:17:  Loss is 1.016510248184204\n",
            "Epoch:18:  Loss is 1.0164058208465576\n",
            "Epoch:19:  Loss is 1.0161149501800537\n",
            "Epoch:20:  Loss is 1.015924334526062\n",
            "Epoch:21:  Loss is 1.0158828496932983\n",
            "Epoch:22:  Loss is 1.0157208442687988\n",
            "Epoch:23:  Loss is 1.015690565109253\n",
            "Epoch:24:  Loss is 1.0156025886535645\n",
            "Epoch:25:  Loss is 1.015383243560791\n",
            "Epoch:26:  Loss is 1.0153164863586426\n",
            "Epoch:27:  Loss is 1.0149041414260864\n",
            "Epoch:28:  Loss is 1.013669490814209\n",
            "Epoch:29:  Loss is 1.0117340087890625\n",
            "Epoch:30:  Loss is 1.009655237197876\n",
            "Epoch:31:  Loss is 1.0089211463928223\n",
            "Epoch:32:  Loss is 1.0052491426467896\n",
            "Epoch:33:  Loss is 1.0046114921569824\n",
            "Epoch:34:  Loss is 1.0021896362304688\n",
            "Epoch:35:  Loss is 0.9998666644096375\n",
            "Epoch:36:  Loss is 0.9972663521766663\n",
            "Epoch:37:  Loss is 0.9952451586723328\n",
            "Epoch:38:  Loss is 0.9920201897621155\n",
            "Epoch:39:  Loss is 0.9930633306503296\n",
            "Epoch:40:  Loss is 0.9898573756217957\n",
            "Epoch:41:  Loss is 0.9872559905052185\n",
            "Epoch:42:  Loss is 0.9866359829902649\n",
            "Epoch:43:  Loss is 0.9845989346504211\n",
            "Epoch:44:  Loss is 0.9818873405456543\n",
            "Epoch:45:  Loss is 0.9792037606239319\n",
            "Epoch:46:  Loss is 0.9756717085838318\n",
            "Epoch:47:  Loss is 0.9801311492919922\n",
            "Epoch:48:  Loss is 0.9748210906982422\n",
            "Epoch:49:  Loss is 0.9765682220458984\n",
            "Epoch:50:  Loss is 0.9685265421867371\n",
            "Epoch:51:  Loss is 0.9733534455299377\n",
            "Epoch:52:  Loss is 0.9715100526809692\n",
            "Epoch:53:  Loss is 0.9704853892326355\n",
            "Epoch:54:  Loss is 0.9673969745635986\n",
            "Epoch:55:  Loss is 0.9645488858222961\n",
            "Epoch:56:  Loss is 0.9647038578987122\n",
            "Epoch:57:  Loss is 0.9635347723960876\n",
            "Epoch:58:  Loss is 0.9600067138671875\n",
            "Epoch:59:  Loss is 0.9613845944404602\n",
            "Epoch:60:  Loss is 0.9625730514526367\n",
            "Epoch:61:  Loss is 0.9610227942466736\n",
            "Epoch:62:  Loss is 0.9580524563789368\n",
            "Epoch:63:  Loss is 0.9644810557365417\n",
            "Epoch:64:  Loss is 0.9662528038024902\n",
            "Epoch:65:  Loss is 0.96654212474823\n",
            "Epoch:66:  Loss is 0.9623374342918396\n",
            "Epoch:67:  Loss is 0.9622742533683777\n",
            "Epoch:68:  Loss is 0.9598297476768494\n",
            "Epoch:69:  Loss is 0.9631460905075073\n",
            "Epoch:70:  Loss is 0.965172529220581\n",
            "Epoch:71:  Loss is 0.9602101445198059\n",
            "Epoch:72:  Loss is 0.9589532017707825\n",
            "Epoch:73:  Loss is 0.9554837942123413\n",
            "Epoch:74:  Loss is 0.9523964524269104\n",
            "Epoch:75:  Loss is 0.9537612795829773\n",
            "Epoch:76:  Loss is 0.9516437649726868\n",
            "Epoch:77:  Loss is 0.9515207409858704\n",
            "Epoch:78:  Loss is 0.9498810768127441\n",
            "Epoch:79:  Loss is 0.9542906284332275\n",
            "Epoch:80:  Loss is 0.9471767544746399\n",
            "Epoch:81:  Loss is 0.9482271671295166\n",
            "Epoch:82:  Loss is 0.948264479637146\n",
            "Epoch:83:  Loss is 0.9491567015647888\n",
            "Epoch:84:  Loss is 0.9460679888725281\n",
            "Epoch:85:  Loss is 0.9474534392356873\n",
            "Epoch:86:  Loss is 0.9448832869529724\n",
            "Epoch:87:  Loss is 0.9489898085594177\n",
            "Epoch:88:  Loss is 0.951126217842102\n",
            "Epoch:89:  Loss is 0.9494456052780151\n",
            "Epoch:90:  Loss is 0.9463141560554504\n",
            "Epoch:91:  Loss is 0.9467339515686035\n",
            "Epoch:92:  Loss is 0.9468718767166138\n",
            "Epoch:93:  Loss is 0.9473283886909485\n",
            "Epoch:94:  Loss is 0.9430039525032043\n",
            "Epoch:95:  Loss is 0.9425909519195557\n",
            "Epoch:96:  Loss is 0.9418690800666809\n",
            "Epoch:97:  Loss is 0.9446693062782288\n",
            "Epoch:98:  Loss is 0.9435399174690247\n",
            "Epoch:99:  Loss is 0.9487741589546204\n",
            "Epoch:100:  Loss is 0.9452140927314758\n",
            "Epoch:101:  Loss is 0.9469640254974365\n",
            "Epoch:102:  Loss is 0.9440407752990723\n",
            "Epoch:103:  Loss is 0.9440361261367798\n",
            "Epoch:104:  Loss is 0.9420641660690308\n",
            "Epoch:105:  Loss is 0.9430511593818665\n",
            "Epoch:106:  Loss is 0.9442576766014099\n",
            "Epoch:107:  Loss is 0.9449172019958496\n",
            "Epoch:108:  Loss is 0.944728672504425\n",
            "Epoch:109:  Loss is 0.9429684281349182\n",
            "Epoch:110:  Loss is 0.9420932531356812\n",
            "Epoch:111:  Loss is 0.9401243329048157\n",
            "Epoch:112:  Loss is 0.938877284526825\n",
            "Epoch:113:  Loss is 0.9388121962547302\n",
            "Epoch:114:  Loss is 0.938789963722229\n",
            "Epoch:115:  Loss is 0.9385502338409424\n",
            "Epoch:116:  Loss is 0.9370145201683044\n",
            "Epoch:117:  Loss is 0.9360941648483276\n",
            "Epoch:118:  Loss is 0.9362347722053528\n",
            "Epoch:119:  Loss is 0.9368256330490112\n",
            "Epoch:120:  Loss is 0.9352898597717285\n",
            "Epoch:121:  Loss is 0.9331958293914795\n",
            "Epoch:122:  Loss is 0.9340348839759827\n",
            "Epoch:123:  Loss is 0.9338756203651428\n",
            "Epoch:124:  Loss is 0.9332486391067505\n",
            "Epoch:125:  Loss is 0.9321404099464417\n",
            "Epoch:126:  Loss is 0.9313982725143433\n",
            "Epoch:127:  Loss is 0.9318290948867798\n",
            "Epoch:128:  Loss is 0.9309240579605103\n",
            "Epoch:129:  Loss is 0.9348466992378235\n",
            "Epoch:130:  Loss is 0.9339605569839478\n",
            "Epoch:131:  Loss is 0.9326789975166321\n",
            "Epoch:132:  Loss is 0.9319617748260498\n",
            "Epoch:133:  Loss is 0.9339548945426941\n",
            "Epoch:134:  Loss is 0.9309214949607849\n",
            "Epoch:135:  Loss is 0.9303350448608398\n",
            "Epoch:136:  Loss is 0.9298803210258484\n",
            "Epoch:137:  Loss is 0.929851770401001\n",
            "Epoch:138:  Loss is 0.9289936423301697\n",
            "Epoch:139:  Loss is 0.9284681677818298\n",
            "Epoch:140:  Loss is 0.9276442527770996\n",
            "Epoch:141:  Loss is 0.9279805421829224\n",
            "Epoch:142:  Loss is 0.9276334047317505\n",
            "Epoch:143:  Loss is 0.9325196743011475\n",
            "Epoch:144:  Loss is 0.9273167252540588\n",
            "Epoch:145:  Loss is 0.9272018671035767\n",
            "Epoch:146:  Loss is 0.926393985748291\n",
            "Epoch:147:  Loss is 0.9258806109428406\n",
            "Epoch:148:  Loss is 0.9250397682189941\n",
            "Epoch:149:  Loss is 0.9250313639640808\n",
            "Epoch:150:  Loss is 0.9244098663330078\n",
            "Epoch:151:  Loss is 0.9246236085891724\n",
            "Epoch:152:  Loss is 0.9238674640655518\n",
            "Epoch:153:  Loss is 0.923627495765686\n",
            "Epoch:154:  Loss is 0.9233439564704895\n",
            "Epoch:155:  Loss is 0.9234113693237305\n",
            "Epoch:156:  Loss is 0.9233031868934631\n",
            "Epoch:157:  Loss is 0.9228531718254089\n",
            "Epoch:158:  Loss is 0.922824501991272\n",
            "Epoch:159:  Loss is 0.9221475720405579\n",
            "Epoch:160:  Loss is 0.9225617051124573\n",
            "Epoch:161:  Loss is 0.9223776459693909\n",
            "Epoch:162:  Loss is 0.9216932654380798\n",
            "Epoch:163:  Loss is 0.9217785596847534\n",
            "Epoch:164:  Loss is 0.9217076897621155\n",
            "Epoch:165:  Loss is 0.9212501645088196\n",
            "Epoch:166:  Loss is 0.921280562877655\n",
            "Epoch:167:  Loss is 0.9208245873451233\n",
            "Epoch:168:  Loss is 0.92037034034729\n",
            "Epoch:169:  Loss is 0.9202408790588379\n",
            "Epoch:170:  Loss is 0.9208787083625793\n",
            "Epoch:171:  Loss is 0.9204654097557068\n",
            "Epoch:172:  Loss is 0.9202132821083069\n",
            "Epoch:173:  Loss is 0.9202253222465515\n",
            "Epoch:174:  Loss is 0.9203422665596008\n",
            "Epoch:175:  Loss is 0.9195365309715271\n",
            "Epoch:176:  Loss is 0.9193335771560669\n",
            "Epoch:177:  Loss is 0.9191123247146606\n",
            "Epoch:178:  Loss is 0.9190475344657898\n",
            "Epoch:179:  Loss is 0.9181299209594727\n",
            "Epoch:180:  Loss is 0.9182713627815247\n",
            "Epoch:181:  Loss is 0.9176045656204224\n",
            "Epoch:182:  Loss is 0.91800856590271\n",
            "Epoch:183:  Loss is 0.9179393649101257\n",
            "Epoch:184:  Loss is 0.9177592396736145\n",
            "Epoch:185:  Loss is 0.9171302914619446\n",
            "Epoch:186:  Loss is 0.9176110625267029\n",
            "Epoch:187:  Loss is 0.9172424077987671\n",
            "Epoch:188:  Loss is 0.9175648093223572\n",
            "Epoch:189:  Loss is 0.9162832498550415\n",
            "Epoch:190:  Loss is 0.9164953231811523\n",
            "Epoch:191:  Loss is 0.916439950466156\n",
            "Epoch:192:  Loss is 0.9165518283843994\n",
            "Epoch:193:  Loss is 0.9162784218788147\n",
            "Epoch:194:  Loss is 0.9162324070930481\n",
            "Epoch:195:  Loss is 0.9156748056411743\n",
            "Epoch:196:  Loss is 0.9160659909248352\n",
            "Epoch:197:  Loss is 0.916155993938446\n",
            "Epoch:198:  Loss is 0.9166004061698914\n",
            "Epoch:199:  Loss is 0.9153261780738831\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "SQzwqKGPz39r",
        "outputId": "5322ce03-1c46-46d4-97b9-8c1ab54cecac",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        " test_loss=0\n",
        "s=0.\n",
        "for id_user in range(nb_users):\n",
        "  input= Variable(training_set[id_user]).unsqueeze(0)\n",
        "  target= Variable(test_set[id_user]).unsqueeze(0)\n",
        "  if torch.sum(target.data>0)>0:\n",
        "    s+=1.\n",
        "    output= sae(input)\n",
        "    target.required_grad=False\n",
        "    output[target==0]=0\n",
        "    loss= criterion(output, target)\n",
        "    mean_corrector= nb_movies/float(torch.sum(target.data>0)+ 1e-10)\n",
        "    \n",
        "    test_loss += np.sqrt(loss.data*mean_corrector)\n",
        "print(test_loss/s)\n",
        "      "
      ],
      "execution_count": 134,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "tensor(0.9581)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "gqUM_5p80BwD"
      },
      "source": [
        ""
      ],
      "execution_count": 80,
      "outputs": []
    }
  ]
}