{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "W11_Tutorial2",
      "provenance": [],
      "collapsed_sections": [],
      "toc_visible": true,
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/student/W11_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-mNeDXY8QlCq"
      },
      "source": [
        "# CIS-522 Week 11 Part 2\n",
        "# Deep Q-Learning\n",
        "\n",
        "__Instructor:__ Dinesh Jayaraman\n",
        "\n",
        "__Content creators:__ Byron Galbraith, Chuning Zhu"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sgwEiAqU-T1S"
      },
      "source": [
        "---"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "ZX4yY8pP-XxF"
      },
      "source": [
        "#@markdown What is your Pennkey and pod? (text, not numbers, e.g. bfranklin)\n",
        "my_pennkey = '' #@param {type:\"string\"}\n",
        "my_pod = 'Select' #@param ['Select', 'euclidean-wombat', 'sublime-newt', 'buoyant-unicorn', 'lackadaisical-manatee','indelible-stingray','superfluous-lyrebird','discreet-reindeer','quizzical-goldfish','astute-jellyfish','ubiquitous-cheetah','nonchalant-crocodile','fashionable-lemur','spiffy-eagle','electric-emu','quotidian-lion']\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5N_2B1gR-h9X"
      },
      "source": [
        "## Recap the experience from last tutorial\n",
        "\n",
        "What did you learn in the last tutorial. What questions do you have? [10 min discussion]"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "z7ZEuBOg-aCu",
        "cellView": "form"
      },
      "source": [
        "learning_from_previous_tutorial = '' #@param {type:\"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2tATejGg8qY6"
      },
      "source": [
        "---\n",
        "# Setup"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "w3T__fbPQk-J"
      },
      "source": [
        "# imports\n",
        "import time\n",
        "from typing import NamedTuple\n",
        "\n",
        "import numpy as np\n",
        "import seaborn\n",
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "\n",
        "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "afVK5IwcCFQ7",
        "cellView": "form"
      },
      "source": [
        "# @title Figure Settings\n",
        "import matplotlib.pyplot as plt\n",
        "import matplotlib.colors as colors\n",
        "import matplotlib.animation as animation\n",
        "import ipywidgets as widgets\n",
        "\n",
        "%matplotlib inline \n",
        "%config InlineBackend.figure_format = 'retina'\n",
        "\n",
        "plt.rcParams.update(plt.rcParamsDefault)\n",
        "plt.rc('animation', html='jshtml')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QspC6YE2QdjT",
        "cellView": "form"
      },
      "source": [
        "# @title Installing the MinAtar environment\n",
        "\n",
        "!pip install -q git+https://github.com/kenjyoung/MinAtar\n",
        "\n",
        "# we do this because sometimes the initial attempt to import the\n",
        "# environment fails, but then succeeds the 2nd time.\n",
        "try:\n",
        "  from minatar.environment import Environment\n",
        "except:\n",
        "  from minatar.environment import Environment"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cZiqYLzZmxjt",
        "cellView": "form"
      },
      "source": [
        "# @title Helper Methods\n",
        "\n",
        "def epsilon_greedy(n_actions, epsilon):\n",
        "  def policy_fn(q_net, state):\n",
        "    if torch.rand(1) < epsilon:\n",
        "      return torch.randint(n_actions, size=(1,), device=device)\n",
        "    else:\n",
        "      with torch.no_grad():\n",
        "        q_pred = q_net(state)\n",
        "        return torch.argmax(q_pred).view(1,)\n",
        "  return policy_fn\n",
        "\n",
        "\n",
        "def phi(x):\n",
        "  return torch.from_numpy(x).type(torch.float32).permute(2, 0, 1).unsqueeze(0).to(device)\n",
        "\n",
        "\n",
        "def simulate(env, agent, max_steps=500): \n",
        "    # Simulate the agent for one episode in the environment\n",
        "    env.reset()\n",
        "    state = env.state()\n",
        "    ep_states = [state]\n",
        "    ep_reward = 0\n",
        "    term = False\n",
        "    step = 0\n",
        "    while (not term) and (step < max_steps):\n",
        "        action = agent.act(phi(state))\n",
        "        rew, term = env.act(action)\n",
        "        state = env.state()\n",
        "        ep_states.append(state)\n",
        "        ep_reward += rew\n",
        "        step += 1\n",
        "    return ep_states, ep_reward\n",
        "\n",
        "\n",
        "def plot_minatar_state(state, ax=None):\n",
        "    if ax is None:\n",
        "        fig, ax = plt.subplots(1, 1)\n",
        "        fig.tight_layout(pad=0)\n",
        "        ax.axis('off')\n",
        "    \n",
        "    # Plotting configuration\n",
        "    nc = env.n_channels\n",
        "    cmap = seaborn.color_palette(\"cubehelix\", nc)\n",
        "    cmap.insert(0, (0,0,0))\n",
        "    cmap = colors.ListedColormap(cmap)\n",
        "    norm = colors.BoundaryNorm([i for i in range(nc+2)], nc+1)\n",
        "    # Convert state to numerical state and plot with colormap\n",
        "    numerical_state = np.amax(state*np.reshape(np.arange(nc)+1,(1,1,-1)),2)+0.5\n",
        "    plot = ax.imshow(numerical_state, cmap=cmap, norm=norm, interpolation='none')\n",
        "    return plot\n",
        "\n",
        "\n",
        "def generate_video(states):\n",
        "    fig, ax = plt.subplots(1, 1)\n",
        "    fig.tight_layout(pad=0)\n",
        "    ax.axis('off')\n",
        "\n",
        "    def frame(i):\n",
        "        ax.clear()\n",
        "        state = states[i]\n",
        "        return plot_minatar_state(state, ax)\n",
        "    \n",
        "    anim = animation.FuncAnimation(fig, frame, frames=range(len(states)), \n",
        "                                   blit=False, repeat=False, repeat_delay=10000)\n",
        "    plt.close() # avoid showing extra plots\n",
        "    return anim\n",
        "\n",
        "\n",
        "def learn_env(env, agent, gamma, n_steps):\n",
        "  env.reset()\n",
        "  state = phi(env.state())\n",
        "\n",
        "  ep_reward = []\n",
        "  ep_steps = []\n",
        "  reward = 0\n",
        "  t = 0\n",
        "  tic = time.time()\n",
        "  for frame in range(n_steps):\n",
        "    act = agent.act(state)\n",
        "    rew, term = env.act(act)\n",
        "    next_state = phi(env.state())\n",
        "    discount = gamma*(1-term)\n",
        "  \n",
        "    agent.train(state, act.unsqueeze(0), rew, discount, next_state, frame)\n",
        "    reward += rew\n",
        "    \n",
        "    if term:\n",
        "      env.reset()\n",
        "      state = phi(env.state())\n",
        "      ep_reward.append(reward)\n",
        "      reward = 0\n",
        "      ep_steps.append(t)\n",
        "      t = 0\n",
        "    else:\n",
        "      state = next_state\n",
        "      t += 1\n",
        "\n",
        "    if (frame+1) % 10000 == 0:\n",
        "      toc = time.time()      \n",
        "      print(f\"Frame: {frame+1}, reward: {ep_reward[-1:]}, steps: {ep_steps[-1:]}, time:{toc-tic}\")\n",
        "      tic = toc\n",
        "\n",
        "  ep_reward.append(reward)  \n",
        "  ep_steps.append(t)\n",
        "  return ep_reward, ep_steps"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tdWyw69GYsvo"
      },
      "source": [
        "---\n",
        "# Section 1: From Q-Learning to Deep Q-Learning\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "K_V07gH34nNM"
      },
      "source": [
        "#@title Video: From Tabular to Deep Q Learning\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"ECV5yeigZIg\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "WQgb5QyZ5YUO"
      },
      "source": [
        "In this tutorial, we will study our first \"deep\" reinforcement learning algorithm: Deep Q-learning. Deep Q-learning is a natural extension of classic Q-learning, which we saw in the last tutorial. Recall that classic Q-learning is a \"tabular\" method. That is, we keep a table of Q-values for all state and action pairs. What happens if the state space is extremely large or continuous? Well, if the state is represented by a 64x64 image, then the Q table will have $256^{64 * 64} * \\text{num_actions}$ entries. This is definitely intractable.\n",
        "\n",
        "So what do we do? Here's where deep learning comes in to play! We can use a neural network to approximate the Q values. Let's call this neural net Q network. The Q network maps a state and action pair to its Q value. In practice, it takes in a state as input and outputs a vector of Q-values for all actions. We can then create the Q target by taking the max or derive a policy by taking the argmax. Mathematically, let $Q_{\\phi}$ be a Q network parametrized by $\\phi$. Given a transition tuple, the Q network minimizes the following objective: \n",
        "\n",
        "$$\\ell(\\phi, s, a, r, s') = \\left(Q_{\\phi}(s, a) - \\left(r + \\gamma \\max_{a'}Q_{\\phi}(s', a')\\right)\\right)^2$$\n",
        "\n",
        "\n",
        "In other words, we minimize the squared error between the Q estimate and the Q target constructed from the Bellman equation. Convince yourself that an optimal Q network would attain zero loss on all transition tuples. We train the Q network using stochastic gradient descent: \n",
        "\n",
        "$$\\phi \\leftarrow \\phi - \\alpha \\nabla\\ell(\\phi, s, a, r, s')$$\n",
        "\n",
        "The \"vanilla\" DQN algorithm looks very similar to Q-learning: \n",
        "```\n",
        "for n episodes:\n",
        "    for T steps:\n",
        "        Forward current state s_t through the Q network to get the Q-values for all actions\n",
        "        Select an action a_t using epsilon-greedy policy\n",
        "        Execute a_t in the environment to get reward r and next state s_{t+1}\n",
        "        Update the Q network by one step of gradient descent\n",
        "```\n",
        "\n",
        "In practice, two standard tricks are essential for DQN to work: replay buffer and target network. We will study them in detail. We will also introduce Double DQN, a simple extension that brings significant performance gain. \n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "hSC9xHSKJcSh"
      },
      "source": [
        "Before moving further, let's see the running example for this tutorial. [MinAtar](https://github.com/kenjyoung/MinAtar) is a reduced implemention of five games from the Atari Learning Environment (ALE). The state of each game is provided as a 10x10xN array, where N is number of relevant game objects. For instance, in the game Breakout, there are four objects - the paddle, where the ball is now, where the ball was last frame, and the remaining blocks. Thus the state for the Breakout environment will have the shape 10x10x4. Each game object only occupies a single element in the cell, as can be seen by running the code below."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TnGpMybZEX_c"
      },
      "source": [
        "# crete the Breakout environment\n",
        "env = Environment('breakout', random_seed=522)\n",
        "# reset the environment and advance state by two frames to illustrate all channels\n",
        "env.reset()\n",
        "env.act(0)\n",
        "env.act(0)\n",
        "state = env.state()\n",
        "plot_minatar_state(state)\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "AlI3mQ7mRTT3"
      },
      "source": [
        "While this greatly reduces the complexity of the environment, the range of possible states is still large enough to be intractable for tabular methods. We can no longer represent our Q function as a lookup table, so we instead turn to function approximation methods. And, since the MinAtar state is multiple channels arranged in a grid, we can turn to deep learning solutions and our old friend convolutional neural networks."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ai9Zu82YUmDV",
        "cellView": "form"
      },
      "source": [
        "#@markdown What is the number of possible states in the MinAtar Breakout environment?\n",
        "num_states = \"\" #@param {type:\"string\"}\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IguCLO6WJMnQ"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/num_states.md)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "H-CSUdaGTxui"
      },
      "source": [
        "## Exercise 1: Creating the Q network\n",
        "In this exercise you will be implementing the forward method of a CNN-based PyTorch module for approximating the Q function of a MinAtar environment. The network needs to take in the state of the environment and produce a value prediction for each possible action. The network itself is given as a 2D convolutional layer followed by two fully connected linear layers with ReLU activation functions on all but the last layer.\n",
        "\n",
        "Note: You will need to flatten the output of the convolutional layer before passing it to the first linear layer."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WaNWFyrojGlC"
      },
      "source": [
        "class QNetwork(nn.Module):\n",
        "  def __init__(self, n_channels, n_actions):\n",
        "    super().__init__()\n",
        "    self.conv = nn.Conv2d(in_channels=n_channels, out_channels=16,\n",
        "                          kernel_size=3, stride=1)\n",
        "    self.fc1 = nn.Linear(in_features=1024, out_features=128)\n",
        "    self.fc2 = nn.Linear(in_features=128, out_features=n_actions)\n",
        "\n",
        "  def forward(self, x):\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Q network\")\n",
        "    ####################################################################\n",
        "\n",
        "    # Pass the input through the convnet layer with ReLU activation\n",
        "    x = ...\n",
        "    # Flatten the result while preserving the batch dimension\n",
        "    x = ...\n",
        "    # Pass the result through the first linear layer with ReLU activation\n",
        "    x = ...\n",
        "    # Finally pass the result through the second linear layer and return\n",
        "    x = ...\n",
        "    return x\n",
        "\n",
        "# Uncomment below to test your module\n",
        "# env = Environment('breakout', random_seed=522)\n",
        "# q_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "# env.reset()\n",
        "# state = env.state()\n",
        "# # note: phi() transforms the state to make it compatible with PyTorch\n",
        "# q_net(phi(state))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "eB4fjYB1KvKi"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/W11_Tutorial2_Solution_Ex01.py)\n",
        "\n",
        "*Example output:*  \n",
        "\n",
        "```python\n",
        "tensor([[0.1021, 0.0603, 0.0767, 0.0273, 0.0022, 0.1477]], device='cuda:0',\n",
        "       grad_fn=<AddmmBackward>)\n",
        "```"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iOFDtKA3X6LX"
      },
      "source": [
        "If all went well you should see the Q function output tensor with 6 values, one for each of the 6 actions MinAtar environments allow. Now that you have a working deep neural network approximating your Q function, let's see how well it works on the Breakout task.\n",
        "\n",
        "First, we are going to define an agent object that is initialized with a policy (always epsilon greedy in these examples), our Q function approximator, and an optimizer for updating our network (always Adam in these examples). On each time step, the `act` method will get called to produce the agent's action given the current state. Once the environment responds to that action, the agent's `train` method gets called with the state, action, reward, discount, next state, and current time step. Just like in the tabular setting, we will start by updating the Q values on every time step."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5E7P4JrzZ9qQ"
      },
      "source": [
        "class QNetworkAgent:\n",
        "  def __init__(self, policy, q_net, optimizer):\n",
        "    self.policy = policy\n",
        "    self.q_net = q_net\n",
        "    self.optimizer = optimizer\n",
        "  \n",
        "  def act(self, state):\n",
        "    # we never need to compute gradients on action selection, so we disable\n",
        "    # autograd to speed up performance\n",
        "    with torch.no_grad():\n",
        "      return self.policy(self.q_net, state)\n",
        "  \n",
        "  def train(self, state, action, reward, discount, next_state, frame):\n",
        "    # Compute our predicted q-value given the state and action from our batch\n",
        "    q_pred = self.q_net(state).gather(1, action)\n",
        "    # Now compute the q-value target (also called td target or bellman backup)\n",
        "    # we don't need to compute gradients on the q-value target, just the q-value\n",
        "    # prediction, so we disable autograd here to speed up performance    \n",
        "    with torch.no_grad():\n",
        "      # First get the best q-value from the next state\n",
        "      q_target = self.q_net(next_state).max(dim=1)[0].view(-1, 1)\n",
        "      # Next apply the reward and discount to get the q-value target\n",
        "      q_target = reward + discount * q_target\n",
        "    # Compute the MSE loss between the predicted and target values\n",
        "    loss = F.mse_loss(q_pred, q_target)\n",
        "\n",
        "    # backpropogation to update the q network\n",
        "    self.optimizer.zero_grad()\n",
        "    loss.backward()\n",
        "    self.optimizer.step()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GoBaaQuXg5Yy"
      },
      "source": [
        "Now that we have our agent, we are going to have it try to learn the MinAtar Breakout environment. The following code will initialize everything needed to do this, along with your QNetwork module. It will then run the helper function `learn_env` which runs the simulation loop of the agent and environment. `learn_env` will automatically start a new episode whenever the current one terminates and will run for a total of `n_steps` before ending the evaluation. It should take less than 2 minutes to run 50k steps.\n",
        "\n",
        "*Note:* 50k steps was chosen to be enough to ideally see some real behavior improvement and differences across algorithms while not taking *too* long to train. To achieve a high-performing agent, you would have to train for much longer.\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_fLGzYhiejBz"
      },
      "source": [
        "n_steps = 50000\n",
        "gamma = 0.99\n",
        "epsilon = 0.1\n",
        "\n",
        "env = Environment('breakout')\n",
        "q_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "policy = epsilon_greedy(env.num_actions(), epsilon)\n",
        "optimizer = torch.optim.Adam(q_net.parameters(), lr=1e-3)\n",
        "agent = QNetworkAgent(policy, q_net, optimizer)\n",
        "eps_b_qn = learn_env(env, agent, gamma, n_steps)\n",
        "\n",
        "plt.figure(figsize=(8, 4))\n",
        "plt.plot(eps_b_qn[0])\n",
        "plt.title('breakout reward curve')\n",
        "plt.xlabel('episode')\n",
        "plt.ylabel('return')\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FcPXURGugm7w"
      },
      "source": [
        "If your QNetwork module is working properly, you should see the agent starting to get a bit better after a few hundred episodes. We can also visualize how our trained agent is actually playing the game by running the following code."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8cVrZ0vCl2XZ"
      },
      "source": [
        "# Generate video\n",
        "ep_states, ep_rew = simulate(env, agent)\n",
        "generate_video(ep_states)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iprQ3XL9l3fA"
      },
      "source": [
        "While this shows some progress on one of tasks in the greatly simplified MinAtar setting, this approach would fare quite poorly on the ALE tasks. The object of the original Deep Q-Network (DQN) paper was to have a general learning solution for all of the games in the ALE. That approach added two key enhancements that enabled this success: using replay buffers and target networks. We will go over each of these in the following sections."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "m9LemcJ2YxR_"
      },
      "source": [
        "---\n",
        "# Section 2: Replay Buffer"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "1IDteCZd5gEL"
      },
      "source": [
        "#@title Video: Experience Replay\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"BBBa2mte1Ls\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZL-nIx8x5r9v"
      },
      "source": [
        "Deep learning likes to use SGD w/ mini-batches to speed up training. How do we do this in the RL setting? Replay buffer lets us do that by keeping track of past experience which we can then sample. After each environment interaction, we add the transition tuple $(s, a, r, s')$ to the replay buffer. During training, we randomly sample a mini-batch of transition tuples from the replay buffer and use it to update the Q-network. With a replay buffer, the loss of the Q network becomes\n",
        "\n",
        "$$\\ell(\\phi, B) = \\frac{1}{|B|}\\sum_{(s, a, r, s') \\in B}\\left(Q_{\\phi}(s, a) - \\left(r + \\gamma \\max_{a'}Q_{\\phi}(s', a')\\right)\\right)^2$$\n",
        "\n",
        "where $B$ is a mini-batch of transition tuples. How large should the replay buffer be? If we use only the most recent data, the Q network will overfit; if we use too much experience, the learning can be very slow. This may take some tuning to get right."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qe-rWbchRDCT",
        "cellView": "form"
      },
      "source": [
        "#@markdown What is a key characteristic of Q-learning that allows us to use a replay buffer? Does it make sense to use a replay buffer for SARSA?\n",
        "characteristic = \"\" #@param {type:\"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OFAwOvofJnoy"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/characteristic.md)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dquRg2w6PtnF"
      },
      "source": [
        "## Exercise 2: Creating the replay buffer\n",
        "\n",
        "In this exercise, you will implement the sampling part of the replay buffer. Since we want to sample randomly from the buffer, you may want to use [numpy.random.choice](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html). We are also making use of a `Batch` convenience object to act as a container for the state, action, reward, discount, and next state tensors. This object extends the Python type `NamedTuple` and just allows us to refer to the tensors by name rather than by index (e.g. `batch.state` vs `batch[0]`).\n",
        "\n",
        "Note that in this implementation we are also storing the discount factor (i.e. $\\gamma$) associated with each time step in the replay buffer as well. $\\gamma$ is usually fixed so why do this? The answer is that we are actually storing the value $\\gamma * (1 - terminal)$ which will be $\\gamma$ if the episode is continuing and 0 if it terminated on that step."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5k3eaxjGPNUo"
      },
      "source": [
        "class Batch(NamedTuple):\n",
        "  state: torch.Tensor\n",
        "  action: torch.Tensor\n",
        "  reward: torch.Tensor\n",
        "  discount: torch.Tensor\n",
        "  next_state: torch.Tensor"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UVDT6NGmjcT3"
      },
      "source": [
        "class ReplayBuffer:\n",
        "  def __init__(self, state_dim, act_dim, buffer_size):\n",
        "    self.buffer_size = buffer_size\n",
        "    self.ptr = 0\n",
        "    self.n_samples = 0\n",
        "\n",
        "    self.state = torch.zeros(buffer_size, *state_dim, dtype=torch.float32, device=device)\n",
        "    self.action = torch.zeros(buffer_size, act_dim, dtype=torch.int64, device=device)\n",
        "    self.reward = torch.zeros(buffer_size, 1, dtype=torch.float32, device=device)\n",
        "    self.discount = torch.zeros(buffer_size, 1, dtype=torch.float32, device=device)\n",
        "    self.next_state = torch.zeros(buffer_size, *state_dim, dtype=torch.float32, device=device)\n",
        "\n",
        "  def add(self, state, action, reward, discount, next_state):\n",
        "    self.state[self.ptr] = state\n",
        "    self.action[self.ptr] = action\n",
        "    self.reward[self.ptr] = reward\n",
        "    self.discount[self.ptr] = discount\n",
        "    self.next_state[self.ptr] = next_state\n",
        "    \n",
        "    if self.n_samples < self.buffer_size:\n",
        "      self.n_samples += 1\n",
        "\n",
        "    self.ptr = (self.ptr + 1) % self.buffer_size\n",
        "\n",
        "  def sample(self, batch_size):\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Replay buffer\")\n",
        "    ####################################################################\n",
        "\n",
        "    # Select batch_size number of sample indicies at random from the buffer\n",
        "    idx = ...\n",
        "    # Using the random indices, assign the corresponding state, action, reward,\n",
        "    # discount, and next state samples.\n",
        "    state = ...\n",
        "    action = ...\n",
        "    reward = ...\n",
        "    discount = ...\n",
        "    next_state = ...\n",
        "\n",
        "    return Batch(state, action, reward, discount, next_state)\n",
        "\n",
        "# Uncomment below to test your code\n",
        "# np.random.seed(522)\n",
        "# torch.manual_seed(522)\n",
        "# state_dim = (1,2,2)\n",
        "# act_dim = 1\n",
        "# buffer_size = 10\n",
        "# buffer = ReplayBuffer(state_dim, act_dim, buffer_size)\n",
        "# for _ in range(3):\n",
        "#   buffer.add(torch.rand(state_dim, dtype=torch.float32),\n",
        "#              torch.randint(6, (1,1)),\n",
        "#              1, 0.99, \n",
        "#              torch.rand(state_dim, dtype=torch.float32))\n",
        "# batch = buffer.sample(2)\n",
        "# batch.state"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8z5z1EkJLPn5"
      },
      "source": [
        "If all went well, you should see the following, a (2x1x2x2) tensor representing a batch of state values:\n",
        "```\n",
        "tensor([[[[0.5818, 0.7804],\n",
        "          [0.3947, 0.7742]]],\n",
        "\n",
        "\n",
        "        [[[0.5818, 0.7804],\n",
        "          [0.3947, 0.7742]]]], device='cuda:0')\n",
        "```\n",
        "\n",
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/W11_Tutorial2_Solution_Ex02.py)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ahTYjm0dY1HY"
      },
      "source": [
        "---\n",
        "# Section 3: Target Network\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "Veekw62E5vLI"
      },
      "source": [
        "#@title Video: Target Networks\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"RaIcFiNqP-0\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "U8OcMeZR51tm"
      },
      "source": [
        "Observe that the Q target is computed using the current Q estimates. Since we update the Q estimates in each training step, we are essentially trying to hit a moving target. This could be mitigated by introducing a target network. Target network is a copy of the Q-network that we update slowly to provide some stability for training. Formally, the Q network loss becomes $$\\ell(\\phi, B) = \\frac{1}{|B|}\\sum_{(s, a, r, s') \\in B}\\left(Q_{\\phi}(s, a) - \\left(r + \\gamma \\max_{a'}Q_{\\phi_{\\text{targ}}}(s', a')\\right)\\right)^2$$\n",
        "\n",
        "There are two ways to maintain the target network. One option is to freeze and hard update (full swap) with some training interval. This was the approach used in the original DQN paper. Another way is to use a soft update (polyak averaging), which updates the target every time step with a small weighted average. This was introduced later in the Deep Deterministic Policy Gradient (DDPG) paper to improve training stability, and is the approach we use here.\n",
        "\n",
        "The following cell defines a soft update function you will use in exercise 3."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "z0YqEyzZt7ak"
      },
      "source": [
        "def soft_update_from_to(source, target, tau):\n",
        "  for target_param, param in zip(target.parameters(), source.parameters()):\n",
        "    target_param.data.copy_(\n",
        "      target_param.data * (1.0 - tau) + param.data * tau\n",
        "  )"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "AF3n3Pnrt0XS"
      },
      "source": [
        "## Exercise 3: Putting everything together\n",
        "\n",
        "In this exercise you will adapt the previous QNetworkAgent to take advantage of both the replay buffer for generating batches of samples to train on and the target network for improving training stability. In particular you will focus on implementing most of the `train` method. Feel free to refer back to the original QNetworkAgent `train` method for hints on how to proceed. The [torch.gather](https://pytorch.org/docs/stable/generated/torch.gather.html) method will be useful for selecting action indices across a batch.\n",
        "\n",
        "The two key changes you will be making from the previous agent are 1) using batches from the replay buffer for training instead of just the direct input and 2) incorporating the target network into the loss function calculation. Recall that the replay buffer returns a `Batch` object, which has state, action, reward, discount, and next_state properties.\n",
        "\n",
        "There are also other changes to the agent's logic related to the replay buffer. The agent now has a parameter for when it is to start training. If the time step hasn't reached that point, the agent just takes a random action and stores the value in the buffer but doesn't update the q or target networks."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1J3RYobXt7Yd"
      },
      "source": [
        "class DQNAgent:\n",
        "  def __init__(self, policy, q_net, target_net, optimizer, tau, replay_buffer,\n",
        "               batch_size, train_start):\n",
        "    self.policy = policy\n",
        "    self.q_net = q_net\n",
        "    self.target_net = target_net\n",
        "    # we never need to compute gradients on the target network, so we disable\n",
        "    # autograd to speed up performance\n",
        "    for p in self.target_net.parameters():\n",
        "      p.requires_grad = False\n",
        "    self.optimizer = optimizer\n",
        "    self.tau = tau\n",
        "    self.replay_buffer = replay_buffer\n",
        "    self.batch_size = batch_size\n",
        "    self.train_start = train_start\n",
        "    self.is_waiting = True\n",
        "    \n",
        "  def act(self, state):\n",
        "    # we never need to compute gradients on action selection, so we disable\n",
        "    # autograd to speed up performance\n",
        "    with torch.no_grad():\n",
        "      if self.is_waiting:\n",
        "        return torch.randint(6, (1,1))\n",
        "      return self.policy(self.q_net, state)\n",
        "  \n",
        "  def train(self, state, action, reward, discount, next_state, frame):\n",
        "    # Add the step to our replay buffer\n",
        "    replay_buffer.add(state, action, reward, discount, next_state)  \n",
        "    # Don't train if we aren't ready\n",
        "    if frame < self.train_start:\n",
        "      return\n",
        "    elif frame == self.train_start:\n",
        "      self.is_waiting = False\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"DQN train\")\n",
        "    ####################################################################\n",
        "\n",
        "    # Using the Replay Buffer you made in exercise 2, sample a batch of steps\n",
        "    # for training\n",
        "    batch = ...\n",
        "\n",
        "    # First let's compute our predicted q-values\n",
        "    # We need to pass our batch of states (batch.state) to our q_net    \n",
        "    q_actions = ...\n",
        "    # Then we select the q-values that correspond to the actions in our batch\n",
        "    # (batch.action) to get our predictions (hint: use the gather method)\n",
        "    q_pred = ...\n",
        "    \n",
        "    # Now compute the q-value target (also known as the td target or bellman\n",
        "    # backup) using our target network. Since we don't need gradients for this,\n",
        "    # we disable autograd here to speed up performance    \n",
        "    with torch.no_grad():\n",
        "      # First get the q-values from our target_net using the batch of next\n",
        "      # states.\n",
        "      q_target_actions = ...\n",
        "      # Get the values that correspond to the best action by taking the max along\n",
        "      # the value dimension (dim=1)\n",
        "      q_target = ...\n",
        "      # Next multiply by batch.discount and add batch.reward\n",
        "      q_target = ...\n",
        "    # Compute the MSE loss between the predicted and target values, then average\n",
        "    # over the batch\n",
        "    loss = ...\n",
        "\n",
        "    # backpropogation to update the q-network\n",
        "    self.optimizer.zero_grad()\n",
        "    loss.backward()\n",
        "    self.optimizer.step()\n",
        "\n",
        "    # soft update target network with the updated q-network\n",
        "    soft_update_from_to(self.q_net, self.target_net, self.tau)\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YQ2a1mZ7LvZE"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/W11_Tutorial2_Solution_Ex03.py)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Yko00xeJvU0h"
      },
      "source": [
        "Now that we have our DQN agent, let's set everything up and see how well it learns the Breakout environment. While doing initial testing, set `n_steps` to 10k and ensure eveything appears to be running. Once it looks good, set `n_steps` to 50k, which should run in around 2 minutes or so."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sCLZiDbZlVhA"
      },
      "source": [
        "n_steps = 50000\n",
        "gamma = 0.99\n",
        "epsilon = 0.1\n",
        "tau = 1e-2  # how slowly we update the target network \n",
        "\n",
        "env = Environment('breakout', random_seed=522)\n",
        "state_dim = (env.n_channels, 10, 10)\n",
        "act_dim = 1\n",
        "buffer_size = 100000\n",
        "batch_size = 64\n",
        "train_start = 5000\n",
        "\n",
        "q_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "target_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "policy = epsilon_greedy(env.num_actions(), epsilon)\n",
        "optimizer = torch.optim.Adam(q_net.parameters(), lr=1e-3)\n",
        "replay_buffer = ReplayBuffer(state_dim, act_dim, buffer_size)\n",
        "agent = DQNAgent(policy, q_net, target_net, optimizer, tau, replay_buffer,\n",
        "                 batch_size, train_start)\n",
        "eps_b_dqn = learn_env(env, agent, gamma, n_steps)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "s7pM9TAX2HaE",
        "cellView": "form"
      },
      "source": [
        "#@markdown Run cell to plot reward curve\n",
        "plt.figure(figsize=(8, 4))\n",
        "plt.plot(eps_b_dqn[0])\n",
        "plt.title('breakout reward curve')\n",
        "plt.xlabel('episode')\n",
        "plt.ylabel('return')\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "A6AfcOWj6mu1",
        "cellView": "form"
      },
      "source": [
        "#@markdown Run cell to generate video\n",
        "ep_states, ep_rew = simulate(env, agent)\n",
        "generate_video(ep_states)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FX9fmL9PAF-v"
      },
      "source": [
        "You should see more consistent and stable returns over time with the DQNAgent vs the QNetworkAgent. Except for the beginning where the agent doesn't really learn much until around 500 episodes or so, which is related to that `train_start` parameter."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "QGb07a4DAz4n"
      },
      "source": [
        "#@markdown What is the purpose of delaying the start of training while choosing random actions?\n",
        "purpose = \"\" #@param {type:\"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "DKtw4kOOKFk2"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/purpose.md)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "fBQ-qCWZzYy0"
      },
      "source": [
        "---\n",
        "# Section 4: Double Q-Learning\n",
        "\n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "apKsIWQm6A5A"
      },
      "source": [
        "#@title Video: Double DQN\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"Lb5ADHnRQV8\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "SJaxjzcF6J3h"
      },
      "source": [
        "A problem with DQN is that the max operator on the target network can result in overestimated values because we are mixing the selection and evaluation of actions in the same network. Double DQN decouples them by making the action selection and evaluation based on two Q-networks updated in parallel: \n",
        "\n",
        "\\begin{align*}\n",
        "Q_{\\phi_1} (s, a) &\\leftarrow r + \\gamma Q_{\\phi_2}\\left(s', \\arg\\max_{a'} Q_{\\phi_1}(s', a')\\right) \\\\\n",
        "Q_{\\phi_2} (s, a) &\\leftarrow r + \\gamma Q_{\\phi_1}\\left(s', \\arg\\max_{a'} Q_{\\phi_2}(s', a')\\right)\n",
        "\\end{align*}\n",
        "\n",
        "If we apply double DQN on top of the target network trick, then in theory, we would have 4 networks: two Q networks and two target Q networks. In practice, however, we can simply use the target network as the second Q network. Since the target network is updated with a lag, it can be used as the second Q network to decouple action selection and evaluation, thus mitigating the overestimation issue. This boils down to a very simple change to the target DQN loss:\n",
        "\n",
        "$$\\ell(\\phi, B) = \\frac{1}{|B|}\\sum_{(s, a, r, s') \\in B}\\left(Q_{\\phi}(s, a) - \\left(r + \\gamma Q_{\\phi_{\\text{targ}}}\\left(s', \\arg\\max_{a'}Q_{\\phi}(s', a')\\right)\\right)\\right)^2$$\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "p4TxiT-hQAQ5"
      },
      "source": [
        "## Exercise 4: Double DQN\n",
        "\n",
        "In the following exercise, you will create the training method for the DoubleDQN agent. Since we are only updating the `train` from the original DQNAgent, you will work from a subclass of that agent. This is a helpful design pattern if you want to only change one part of an existing agent while keeping the rest of its behavior intact. In the case of DoubleDQN, you are only changing how the target Q value is determined."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "7JmLX1_gzqTa"
      },
      "source": [
        "class DoubleDQNAgent(DQNAgent):\n",
        "  def train(self, state, action, reward, discount, next_state, frame):\n",
        "    # Add the step to our replay buffer\n",
        "    replay_buffer.add(state, action, reward, discount, next_state)  \n",
        "    # Don't train if we aren't ready\n",
        "    if frame < self.train_start:\n",
        "      return\n",
        "    elif frame == self.train_start:\n",
        "      self.is_waiting = False\n",
        "    \n",
        "    # Sample a batch of steps for training\n",
        "    batch = self.replay_buffer.sample(self.batch_size)\n",
        "    \n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Double DQN\")\n",
        "    ####################################################################\n",
        "    \n",
        "    # First let's compute our predicted q-values\n",
        "    # We need to pass our batch of states (batch.state) to our q_net    \n",
        "    q_actions = ...\n",
        "    # Then we select the q-values that correspond to the actions in our batch\n",
        "    # (batch.action) to get our predictions (hint: use the gather method)\n",
        "    q_pred = ...\n",
        "\n",
        "    # Now compute the q-value target (also known as the td target or bellman\n",
        "    # backup) using our target network. Since we don't need gradients for this,\n",
        "    # we disable autograd here to speed up performance\n",
        "    with torch.no_grad():\n",
        "      # Compute the action values from our q_net using a batch.next_state\n",
        "      q_next_actions = ...\n",
        "      # Use this to find the actions that correspond to the largest values\n",
        "      # (i.e. argmax)\n",
        "      max_acts = ...\n",
        "      # Next get the action values using our target_net and batch.next_state\n",
        "      q_target_actions = ...\n",
        "      # Then we select the q-values that correspond to the actions we just found\n",
        "      # (hint: use the gather method)\n",
        "      q_target = ...\n",
        "      # Next multiply by batch.discount and add batch.reward\n",
        "      q_target = ...\n",
        "    \n",
        "    # Compute the MSE loss between the predicted and target values, then average\n",
        "    # over the batch\n",
        "    loss = ...\n",
        "\n",
        "    # backpropogation to update the q network\n",
        "    self.optimizer.zero_grad()\n",
        "    loss.backward()\n",
        "    self.optimizer.step()\n",
        "\n",
        "    # soft update target network with the updated q-network\n",
        "    soft_update_from_to(self.q_net, self.target_net, self.tau)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "SwVOPfnhMJNY"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/W11_Tutorial2_Solution_Ex04.py)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JC48M9IdE-bW"
      },
      "source": [
        "Now that we have our DoubleDQNAgent, let's see how it fares on the Breakout envionment. Again, set `n_steps` to something small but larger than the `train_start` value (e.g. 10k) to ensure everything is running smoothly. Once satisfied, set `n_steps` to 50k steps and run, which should take around 2.5 minutes."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fFaxhbIu1fKt"
      },
      "source": [
        "n_steps = 50000\n",
        "gamma = 0.99\n",
        "epsilon = 0.1\n",
        "tau = 1e-2\n",
        "\n",
        "env = Environment('breakout', random_seed=522)\n",
        "state_dim = (env.n_channels, 10, 10)\n",
        "act_dim = 1\n",
        "buffer_size = 100000\n",
        "batch_size = 64\n",
        "train_start = 5000\n",
        "\n",
        "q_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "target_net = QNetwork(env.n_channels, env.num_actions()).to(device)\n",
        "policy = epsilon_greedy(env.num_actions(), epsilon)\n",
        "optimizer = torch.optim.Adam(q_net.parameters(), lr=1e-3)\n",
        "replay_buffer = ReplayBuffer(state_dim, act_dim, buffer_size)\n",
        "\n",
        "agent = DoubleDQNAgent(policy, q_net, target_net, optimizer, tau, replay_buffer,\n",
        "                       batch_size, train_start)\n",
        "eps_b_ddqn = learn_env(env, agent, gamma, n_steps)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "B4SGySWG2jmG",
        "cellView": "form"
      },
      "source": [
        "#@markdown Run cell to plot reward curve\n",
        "plt.figure(figsize=(8, 4))\n",
        "plt.plot(eps_b_ddqn[0])\n",
        "plt.title('breakout reward curve')\n",
        "plt.xlabel('episode')\n",
        "plt.ylabel('return')\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xTBc4-Hh8EQU",
        "cellView": "form"
      },
      "source": [
        "#@markdown Run cell to generate video\n",
        "ep_states, ep_rew = simulate(env, agent)\n",
        "generate_video(ep_states)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lSKx58QaFkVM"
      },
      "source": [
        "We should see similar performance initially to that of the DQNAgent, but a jump in some of the high scores after around 1000 episodes and a slightly better average result within the first 50k steps. You may have noticed however that this came at a performance hit in terms of training speed over the DQNAgent, which in turn was slower to train than the QNetworkAgent."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "3YIvtNpuGD5B"
      },
      "source": [
        "#@markdown What is the source of the slowdown per 10,000 steps across the three different agents?\n",
        "slowdown = \"\" #@param {type:\"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YAQ5XEg1Kbvk"
      },
      "source": [
        "[*Click for solution*](https://github.com/CIS-522/course-content/blob/main/tutorials/W11_DeepRL/solutions/slowdown.md)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YIOcTfE0Y6c6"
      },
      "source": [
        "---\n",
        "# Section 5: Extending to Continuous Actions\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "pabtBGHk6PM3"
      },
      "source": [
        "#@title Video: Deep Deterministic Policy Gradients\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"ljHwbY9QrJU\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "veJ0eiOl6Qh9"
      },
      "source": [
        "So far, we've seen a few successes of DQN on the MinAtar environment. Can we use DQN to solve all RL problems? Unfortunately, one glaring assumption of DQN is discrete actions. When computing the Q target or deriving the greedy policy, we need to take the argmax of Q-values over actions. If the action space is continuous, then searching for the argmax is a nontrivial optimization problem. Since we need to perform this search in each step of training and inference, the computation time becomes intractable.\n",
        "\n",
        "[Deep Deterministic Policy Gradient](https://arxiv.org/abs/1509.02971) extends DQN to continuous actions by introducing an actor network. The goal of the actor network is to predict the action that maximizes the Q-value given the current state. This effectively amortizes the argmax search into training. Concretely, let $\\mu_{\\theta}$ be an actor network paramterized by $\\theta$. The actor's objective is to output an action that maximizes its Q-value: $$\\max_{\\theta} E_{s \\sim D}\\left[Q_{\\phi}(s, \\mu_{\\theta}(s))\\right]$$ and the Q network's loss becomes $$L(\\phi, D) = E_{(s, a, r, s') \\sim D}\\left[\\left(Q_\\phi(s, a) - (r + \\gamma Q(s', \\mu_{\\theta}(s'))\\right)^2\\right] $$ where we have replaced $\\max_{a'}Q(s', a')$ with $Q(s', \\mu_\\theta(s'))$. During training, we optimize the actor network and the Q network jointly using gradient descent.\n",
        "\n",
        "You may have noticed that the actor network looks like a policy. Indeed, DDPG is belongs to a family of RL algorithms called actor-critic algorithms. An actor-critic algorithm typically consists of a policy and a value function that are jointly optimized. The value function guides policy learning, and inference is done with the policy alone. We will discuss policy optimization and actor-critic algorithms in greater detail next week. \n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Zz4XQ10F9EgM"
      },
      "source": [
        "---\n",
        "# Response and Feedback forms"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "PZyBVdLw9I72",
        "cellView": "form"
      },
      "source": [
        "import time\n",
        "import numpy as np\n",
        "import urllib.parse\n",
        "from IPython.display import IFrame\n",
        "\n",
        "#@markdown #Run Cell to Show Airtable Form\n",
        "#@markdown ##**Confirm your answers and then click \"Submit\"**\n",
        "\n",
        "\n",
        "def prefill_form(src, fields: dict):\n",
        "  '''\n",
        "  src: the original src url to embed the form\n",
        "  fields: a dictionary of field:value pairs,\n",
        "  e.g. {\"pennkey\": my_pennkey, \"location\": my_location}\n",
        "  '''\n",
        "  prefill_fields = {}\n",
        "  for key in fields:\n",
        "      new_key = 'prefill_' + key\n",
        "      prefill_fields[new_key] = fields[key]\n",
        "  prefills = urllib.parse.urlencode(prefill_fields)\n",
        "  src = src + prefills\n",
        "  return src\n",
        "\n",
        "\n",
        "#autofill time if it is not present\n",
        "try: t0;\n",
        "except NameError: t0 = time.time()\n",
        "try: t1;\n",
        "except NameError: t1 = time.time()\n",
        "try: t2;\n",
        "except NameError: t2 = time.time()\n",
        "try: t3;\n",
        "except NameError: t3 = time.time()\n",
        "try: t4;\n",
        "except NameError: t4 = time.time()\n",
        "try: t5;\n",
        "except NameError: t5 = time.time()\n",
        "try: t6;\n",
        "except NameError: t6 = time.time()\n",
        "\n",
        "#autofill fields if they are not present\n",
        "#a missing pennkey and pod will result in an Airtable warning\n",
        "#which is easily fixed user-side.\n",
        "try: my_pennkey;\n",
        "except NameError: my_pennkey = \"\"\n",
        "try: my_pod;\n",
        "except NameError: my_pod = \"Select\"\n",
        "try: learning_from_previous_tutorial;\n",
        "except NameError: learning_from_previous_tutorial = \"\"\n",
        "try: num_states;\n",
        "except NameError: num_states = \"\"\n",
        "try: characteristic;\n",
        "except NameError: characteristic = \"\"\n",
        "try: purpose;\n",
        "except NameError: purpose = \"\"\n",
        "try: slowdown;\n",
        "except NameError: slowdown = \"\"\n",
        "\n",
        "times = np.array([t1,t2,t3,t4,t5,t6])-t0\n",
        "\n",
        "fields = {\"pennkey\": my_pennkey,\n",
        "          \"pod\": my_pod,\n",
        "          \"learning_from_previous_tutorial\": learning_from_previous_tutorial,\n",
        "          \"num_states\": num_states,\n",
        "          \"characteristic\": characteristic,\n",
        "          \"purpose\": purpose,\n",
        "          \"slowdown\": slowdown,\n",
        "          \"cumulative_times\": times}\n",
        "\n",
        "src = \"https://airtable.com/embed/shr9rdodIrMvW4OuG?\"\n",
        "\n",
        "\n",
        "# now instead of the original source url, we do: src = prefill_form(src, fields)\n",
        "display(IFrame(src = prefill_form(src, fields), width = 800, height = 400))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cnnPT7659Or5"
      },
      "source": [
        "## Feedback\n",
        "How could this session have been better? How happy are you in your group? How do you feel right now?\n",
        "\n",
        "Feel free to use the embeded form below or use this link:\n",
        "<a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://airtable.com/shrNSJ5ECXhNhsYss\">https://airtable.com/shrNSJ5ECXhNhsYss</a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sVen1xmg9YiI"
      },
      "source": [
        "display(IFrame(src=\"https://airtable.com/embed/shrNSJ5ECXhNhsYss?backgroundColor=red\", width = 800, height = 400))"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}