{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cYLZzIbjk62B"
      },
      "source": [
        "# Waymo Open Dataset Motion Tutorial\n",
        "\n",
        "- Website: https://waymo.com/open\n",
        "- GitHub: https://github.com/waymo-research/waymo-open-dataset\n",
        "\n",
        "This tutorial demonstrates:\n",
        "- How to decode and interpret the data.\n",
        "- How to train a simple model with Tensorflow.\n",
        "\n",
        "Visit the [Waymo Open Dataset Website](https://waymo.com/open) to download the full dataset.\n",
        "\n",
        "To use, open this notebook in [Colab](https://colab.research.google.com).\n",
        "\n",
        "Uncheck the box \"Reset all runtimes before running\" if you run this colab directly from the remote kernel. Alternatively, you can make a copy before trying to run it by following \"File \u003e Save copy in Drive ...\"."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5ez4Nsk06Sqd"
      },
      "source": [
        "# Package installation\n",
        "\n",
        "Please follow the instructions in [tutorial.ipynb](https://github.com/waymo-research/waymo-open-dataset/blob/master/tutorial/tutorial.ipynb)."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wjT3Rdd4lSqC"
      },
      "source": [
        "# Imports and global definitions"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xdEcN6WilcBn"
      },
      "outputs": [],
      "source": [
        "# Data location. Please edit.\n",
        "\n",
        "# A tfrecord containing tf.Example protos as downloaded from the Waymo dataset\n",
        "# webpage.\n",
        "\n",
        "# Replace this path with your own tfrecords.\n",
        "FILENAME = '/content/waymo-od-motion/tutorial/.../tfexample.tfrecord'"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "M5gzSlBTlTiS"
      },
      "outputs": [],
      "source": [
        "import math\n",
        "import os\n",
        "import uuid\n",
        "import time\n",
        "\n",
        "from matplotlib import cm\n",
        "import matplotlib.animation as animation\n",
        "import matplotlib.pyplot as plt\n",
        "\n",
        "import numpy as np\n",
        "from IPython.display import HTML\n",
        "import itertools\n",
        "import tensorflow as tf\n",
        "\n",
        "from google.protobuf import text_format\n",
        "from waymo_open_dataset.metrics.ops import py_metrics_ops\n",
        "from waymo_open_dataset.metrics.python import config_util_py as config_util\n",
        "from waymo_open_dataset.protos import motion_metrics_pb2\n",
        "\n",
        "# Example field definition\n",
        "roadgraph_features = {\n",
        "    'roadgraph_samples/dir':\n",
        "        tf.io.FixedLenFeature([20000, 3], tf.float32, default_value=None),\n",
        "    'roadgraph_samples/id':\n",
        "        tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),\n",
        "    'roadgraph_samples/type':\n",
        "        tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),\n",
        "    'roadgraph_samples/valid':\n",
        "        tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),\n",
        "    'roadgraph_samples/xyz':\n",
        "        tf.io.FixedLenFeature([20000, 3], tf.float32, default_value=None),\n",
        "}\n",
        "\n",
        "# Features of other agents.\n",
        "state_features = {\n",
        "    'state/id':\n",
        "        tf.io.FixedLenFeature([128], tf.float32, default_value=None),\n",
        "    'state/type':\n",
        "        tf.io.FixedLenFeature([128], tf.float32, default_value=None),\n",
        "    'state/is_sdc':\n",
        "        tf.io.FixedLenFeature([128], tf.int64, default_value=None),\n",
        "    'state/tracks_to_predict':\n",
        "        tf.io.FixedLenFeature([128], tf.int64, default_value=None),\n",
        "    'state/current/bbox_yaw':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/height':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/length':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/timestamp_micros':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.int64, default_value=None),\n",
        "    'state/current/valid':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.int64, default_value=None),\n",
        "    'state/current/vel_yaw':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/velocity_x':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/velocity_y':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/width':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/x':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/y':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/current/z':\n",
        "        tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),\n",
        "    'state/future/bbox_yaw':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/height':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/length':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/timestamp_micros':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.int64, default_value=None),\n",
        "    'state/future/valid':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.int64, default_value=None),\n",
        "    'state/future/vel_yaw':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/velocity_x':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/velocity_y':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/width':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/x':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/y':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/future/z':\n",
        "        tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),\n",
        "    'state/past/bbox_yaw':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/height':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/length':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/timestamp_micros':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.int64, default_value=None),\n",
        "    'state/past/valid':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.int64, default_value=None),\n",
        "    'state/past/vel_yaw':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/velocity_x':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/velocity_y':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/width':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/x':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/y':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "    'state/past/z':\n",
        "        tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),\n",
        "}\n",
        "\n",
        "traffic_light_features = {\n",
        "    'traffic_light_state/current/state':\n",
        "        tf.io.FixedLenFeature([1, 16], tf.int64, default_value=None),\n",
        "    'traffic_light_state/current/valid':\n",
        "        tf.io.FixedLenFeature([1, 16], tf.int64, default_value=None),\n",
        "    'traffic_light_state/current/x':\n",
        "        tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),\n",
        "    'traffic_light_state/current/y':\n",
        "        tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),\n",
        "    'traffic_light_state/current/z':\n",
        "        tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),\n",
        "    'traffic_light_state/past/state':\n",
        "        tf.io.FixedLenFeature([10, 16], tf.int64, default_value=None),\n",
        "    'traffic_light_state/past/valid':\n",
        "        tf.io.FixedLenFeature([10, 16], tf.int64, default_value=None),\n",
        "    'traffic_light_state/past/x':\n",
        "        tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),\n",
        "    'traffic_light_state/past/y':\n",
        "        tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),\n",
        "    'traffic_light_state/past/z':\n",
        "        tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),\n",
        "}\n",
        "\n",
        "features_description = {}\n",
        "features_description.update(roadgraph_features)\n",
        "features_description.update(state_features)\n",
        "features_description.update(traffic_light_features)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "trAv9YGrvYnc"
      },
      "source": [
        "# Visualize TF Example sample"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iWnysu4X7Wkt"
      },
      "source": [
        "## Create Dataset."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "TpEZq1EMtXV9"
      },
      "outputs": [],
      "source": [
        "dataset = tf.data.TFRecordDataset(FILENAME, compression_type='')\n",
        "data = next(dataset.as_numpy_iterator())\n",
        "parsed = tf.io.parse_single_example(data, features_description)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Zdc8CBg27dtn"
      },
      "source": [
        "## Generate visualization images."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "utTE9Mtgx3Fq"
      },
      "outputs": [],
      "source": [
        "def create_figure_and_axes(size_pixels):\n",
        "  \"\"\"Initializes a unique figure and axes for plotting.\"\"\"\n",
        "  fig, ax = plt.subplots(1, 1, num=uuid.uuid4())\n",
        "\n",
        "  # Sets output image to pixel resolution.\n",
        "  dpi = 100\n",
        "  size_inches = size_pixels / dpi\n",
        "  fig.set_size_inches([size_inches, size_inches])\n",
        "  fig.set_dpi(dpi)\n",
        "  fig.set_facecolor('white')\n",
        "  ax.set_facecolor('white')\n",
        "  ax.xaxis.label.set_color('black')\n",
        "  ax.tick_params(axis='x', colors='black')\n",
        "  ax.yaxis.label.set_color('black')\n",
        "  ax.tick_params(axis='y', colors='black')\n",
        "  fig.set_tight_layout(True)\n",
        "  ax.grid(False)\n",
        "  return fig, ax\n",
        "\n",
        "\n",
        "def fig_canvas_image(fig):\n",
        "  \"\"\"Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb().\"\"\"\n",
        "  # Just enough margin in the figure to display xticks and yticks.\n",
        "  fig.subplots_adjust(\n",
        "      left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)\n",
        "  fig.canvas.draw()\n",
        "  data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n",
        "  return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
        "\n",
        "\n",
        "def get_colormap(num_agents):\n",
        "  \"\"\"Compute a color map array of shape [num_agents, 4].\"\"\"\n",
        "  colors = cm.get_cmap('jet', num_agents)\n",
        "  colors = colors(range(num_agents))\n",
        "  np.random.shuffle(colors)\n",
        "  return colors\n",
        "\n",
        "\n",
        "def get_viewport(all_states, all_states_mask):\n",
        "  \"\"\"Gets the region containing the data.\n",
        "\n",
        "  Args:\n",
        "    all_states: states of agents as an array of shape [num_agents, num_steps,\n",
        "      2].\n",
        "    all_states_mask: binary mask of shape [num_agents, num_steps] for\n",
        "      `all_states`.\n",
        "\n",
        "  Returns:\n",
        "    center_y: float. y coordinate for center of data.\n",
        "    center_x: float. x coordinate for center of data.\n",
        "    width: float. Width of data.\n",
        "  \"\"\"\n",
        "  valid_states = all_states[all_states_mask]\n",
        "  all_y = valid_states[..., 1]\n",
        "  all_x = valid_states[..., 0]\n",
        "\n",
        "  center_y = (np.max(all_y) + np.min(all_y)) / 2\n",
        "  center_x = (np.max(all_x) + np.min(all_x)) / 2\n",
        "\n",
        "  range_y = np.ptp(all_y)\n",
        "  range_x = np.ptp(all_x)\n",
        "\n",
        "  width = max(range_y, range_x)\n",
        "\n",
        "  return center_y, center_x, width\n",
        "\n",
        "\n",
        "def visualize_one_step(states,\n",
        "                       mask,\n",
        "                       roadgraph,\n",
        "                       title,\n",
        "                       center_y,\n",
        "                       center_x,\n",
        "                       width,\n",
        "                       color_map,\n",
        "                       size_pixels=1000):\n",
        "  \"\"\"Generate visualization for a single step.\"\"\"\n",
        "\n",
        "  # Create figure and axes.\n",
        "  fig, ax = create_figure_and_axes(size_pixels=size_pixels)\n",
        "\n",
        "  # Plot roadgraph.\n",
        "  rg_pts = roadgraph[:, :2].T\n",
        "  ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)\n",
        "\n",
        "  masked_x = states[:, 0][mask]\n",
        "  masked_y = states[:, 1][mask]\n",
        "  colors = color_map[mask]\n",
        "\n",
        "  # Plot agent current position.\n",
        "  ax.scatter(\n",
        "      masked_x,\n",
        "      masked_y,\n",
        "      marker='o',\n",
        "      linewidths=3,\n",
        "      color=colors,\n",
        "  )\n",
        "\n",
        "  # Title.\n",
        "  ax.set_title(title)\n",
        "\n",
        "  # Set axes.  Should be at least 10m on a side and cover 160% of agents.\n",
        "  size = max(10, width * 1.0)\n",
        "  ax.axis([\n",
        "      -size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,\n",
        "      size / 2 + center_y\n",
        "  ])\n",
        "  ax.set_aspect('equal')\n",
        "\n",
        "  image = fig_canvas_image(fig)\n",
        "  plt.close(fig)\n",
        "  return image\n",
        "\n",
        "\n",
        "def visualize_all_agents_smooth(\n",
        "    decoded_example,\n",
        "    size_pixels=1000,\n",
        "):\n",
        "  \"\"\"Visualizes all agent predicted trajectories in a serie of images.\n",
        "\n",
        "  Args:\n",
        "    decoded_example: Dictionary containing agent info about all modeled agents.\n",
        "    size_pixels: The size in pixels of the output image.\n",
        "\n",
        "  Returns:\n",
        "    T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.\n",
        "  \"\"\"\n",
        "  # [num_agents, num_past_steps, 2] float32.\n",
        "  past_states = tf.stack(\n",
        "      [decoded_example['state/past/x'], decoded_example['state/past/y']],\n",
        "      -1).numpy()\n",
        "  past_states_mask = decoded_example['state/past/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_agents, 1, 2] float32.\n",
        "  current_states = tf.stack(\n",
        "      [decoded_example['state/current/x'], decoded_example['state/current/y']],\n",
        "      -1).numpy()\n",
        "  current_states_mask = decoded_example['state/current/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_agents, num_future_steps, 2] float32.\n",
        "  future_states = tf.stack(\n",
        "      [decoded_example['state/future/x'], decoded_example['state/future/y']],\n",
        "      -1).numpy()\n",
        "  future_states_mask = decoded_example['state/future/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_points, 3] float32.\n",
        "  roadgraph_xyz = decoded_example['roadgraph_samples/xyz'].numpy()\n",
        "\n",
        "  num_agents, num_past_steps, _ = past_states.shape\n",
        "  num_future_steps = future_states.shape[1]\n",
        "\n",
        "  color_map = get_colormap(num_agents)\n",
        "\n",
        "  # [num_agens, num_past_steps + 1 + num_future_steps, depth] float32.\n",
        "  all_states = np.concatenate([past_states, current_states, future_states], 1)\n",
        "\n",
        "  # [num_agens, num_past_steps + 1 + num_future_steps] float32.\n",
        "  all_states_mask = np.concatenate(\n",
        "      [past_states_mask, current_states_mask, future_states_mask], 1)\n",
        "\n",
        "  center_y, center_x, width = get_viewport(all_states, all_states_mask)\n",
        "\n",
        "  images = []\n",
        "\n",
        "  # Generate images from past time steps.\n",
        "  for i, (s, m) in enumerate(\n",
        "      zip(\n",
        "          np.split(past_states, num_past_steps, 1),\n",
        "          np.split(past_states_mask, num_past_steps, 1))):\n",
        "    im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,\n",
        "                            'past: %d' % (num_past_steps - i), center_y,\n",
        "                            center_x, width, color_map, size_pixels)\n",
        "    images.append(im)\n",
        "\n",
        "  # Generate one image for the current time step.\n",
        "  s = current_states\n",
        "  m = current_states_mask\n",
        "\n",
        "  im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,\n",
        "                          center_x, width, color_map, size_pixels)\n",
        "  images.append(im)\n",
        "\n",
        "  # Generate images from future time steps.\n",
        "  for i, (s, m) in enumerate(\n",
        "      zip(\n",
        "          np.split(future_states, num_future_steps, 1),\n",
        "          np.split(future_states_mask, num_future_steps, 1))):\n",
        "    im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,\n",
        "                            'future: %d' % (i + 1), center_y, center_x, width,\n",
        "                            color_map, size_pixels)\n",
        "    images.append(im)\n",
        "\n",
        "  return images\n",
        "\n",
        "\n",
        "images = visualize_all_agents_smooth(parsed)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OrIZjUHG7hM3"
      },
      "source": [
        "## Display animation."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tt2IeGiG0eny"
      },
      "outputs": [],
      "source": [
        "def create_animation(images):\n",
        "  \"\"\" Creates a Matplotlib animation of the given images.\n",
        "\n",
        "  Args:\n",
        "    images: A list of numpy arrays representing the images.\n",
        "\n",
        "  Returns:\n",
        "    A matplotlib.animation.Animation.\n",
        "\n",
        "  Usage:\n",
        "    anim = create_animation(images)\n",
        "    anim.save('/tmp/animation.avi')\n",
        "    HTML(anim.to_html5_video())\n",
        "  \"\"\"\n",
        "\n",
        "  plt.ioff()\n",
        "  fig, ax = plt.subplots()\n",
        "  dpi = 100\n",
        "  size_inches = 1000 / dpi\n",
        "  fig.set_size_inches([size_inches, size_inches])\n",
        "  plt.ion()\n",
        "\n",
        "  def animate_func(i):\n",
        "    ax.imshow(images[i])\n",
        "    ax.set_xticks([])\n",
        "    ax.set_yticks([])\n",
        "    ax.grid('off')\n",
        "\n",
        "  anim = animation.FuncAnimation(\n",
        "      fig, animate_func, frames=len(images) // 2, interval=100)\n",
        "  plt.close(fig)\n",
        "  return anim\n",
        "\n",
        "\n",
        "anim = create_animation(images[::5])\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wdOQTZAiuKdQ"
      },
      "source": [
        "# Simple MLP model with TF\n",
        "\n",
        "Note that this is a very simple example model to demonstrate inputs parsing and metrics computation. Not at all competitive."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "b_5G9lx9uK9B"
      },
      "outputs": [],
      "source": [
        "def _parse(value):\n",
        "  decoded_example = tf.io.parse_single_example(value, features_description)\n",
        "\n",
        "  past_states = tf.stack([\n",
        "      decoded_example['state/past/x'], decoded_example['state/past/y'],\n",
        "      decoded_example['state/past/length'], decoded_example['state/past/width'],\n",
        "      decoded_example['state/past/bbox_yaw'],\n",
        "      decoded_example['state/past/velocity_x'],\n",
        "      decoded_example['state/past/velocity_y']\n",
        "  ], -1)\n",
        "\n",
        "  cur_states = tf.stack([\n",
        "      decoded_example['state/current/x'], decoded_example['state/current/y'],\n",
        "      decoded_example['state/current/length'],\n",
        "      decoded_example['state/current/width'],\n",
        "      decoded_example['state/current/bbox_yaw'],\n",
        "      decoded_example['state/current/velocity_x'],\n",
        "      decoded_example['state/current/velocity_y']\n",
        "  ], -1)\n",
        "\n",
        "  input_states = tf.concat([past_states, cur_states], 1)[..., :2]\n",
        "\n",
        "  future_states = tf.stack([\n",
        "      decoded_example['state/future/x'], decoded_example['state/future/y'],\n",
        "      decoded_example['state/future/length'],\n",
        "      decoded_example['state/future/width'],\n",
        "      decoded_example['state/future/bbox_yaw'],\n",
        "      decoded_example['state/future/velocity_x'],\n",
        "      decoded_example['state/future/velocity_y']\n",
        "  ], -1)\n",
        "\n",
        "  gt_future_states = tf.concat([past_states, cur_states, future_states], 1)\n",
        "\n",
        "  past_is_valid = decoded_example['state/past/valid'] \u003e 0\n",
        "  current_is_valid = decoded_example['state/current/valid'] \u003e 0\n",
        "  future_is_valid = decoded_example['state/future/valid'] \u003e 0\n",
        "  gt_future_is_valid = tf.concat(\n",
        "      [past_is_valid, current_is_valid, future_is_valid], 1)\n",
        "\n",
        "  # If a sample was not seen at all in the past, we declare the sample as\n",
        "  # invalid.\n",
        "  sample_is_valid = tf.reduce_any(\n",
        "      tf.concat([past_is_valid, current_is_valid], 1), 1)\n",
        "\n",
        "  inputs = {\n",
        "      'input_states': input_states,\n",
        "      'gt_future_states': gt_future_states,\n",
        "      'gt_future_is_valid': gt_future_is_valid,\n",
        "      'object_type': decoded_example['state/type'],\n",
        "      'tracks_to_predict': decoded_example['state/tracks_to_predict'] \u003e 0,\n",
        "      'sample_is_valid': sample_is_valid,\n",
        "  }\n",
        "  return inputs\n",
        "\n",
        "\n",
        "def _default_metrics_config():\n",
        "  config = motion_metrics_pb2.MotionMetricsConfig()\n",
        "  config_text = \"\"\"\n",
        "  track_steps_per_second: 10\n",
        "  prediction_steps_per_second: 2\n",
        "  track_history_samples: 10\n",
        "  track_future_samples: 80\n",
        "  speed_lower_bound: 1.4\n",
        "  speed_upper_bound: 11.0\n",
        "  speed_scale_lower: 0.5\n",
        "  speed_scale_upper: 1.0\n",
        "  step_configurations {\n",
        "    measurement_step: 5\n",
        "    lateral_miss_threshold: 1.0\n",
        "    longitudinal_miss_threshold: 2.0\n",
        "  }\n",
        "  step_configurations {\n",
        "    measurement_step: 9\n",
        "    lateral_miss_threshold: 1.8\n",
        "    longitudinal_miss_threshold: 3.6\n",
        "  }\n",
        "  step_configurations {\n",
        "    measurement_step: 15\n",
        "    lateral_miss_threshold: 3.0\n",
        "    longitudinal_miss_threshold: 6.0\n",
        "  }\n",
        "  max_predictions: 6\n",
        "  \"\"\"\n",
        "  text_format.Parse(config_text, config)\n",
        "  return config\n",
        "\n",
        "\n",
        "class SimpleModel(tf.keras.Model):\n",
        "  \"\"\"A simple one-layer regressor.\"\"\"\n",
        "\n",
        "  def __init__(self, num_agents_per_scenario, num_states_steps,\n",
        "               num_future_steps):\n",
        "    super(SimpleModel, self).__init__()\n",
        "    self._num_agents_per_scenario = num_agents_per_scenario\n",
        "    self._num_states_steps = num_states_steps\n",
        "    self._num_future_steps = num_future_steps\n",
        "    self.regressor = tf.keras.layers.Dense(num_future_steps * 2)\n",
        "\n",
        "  def call(self, states):\n",
        "    states = tf.reshape(states, (-1, self._num_states_steps * 2))\n",
        "    pred = self.regressor(states)\n",
        "    pred = tf.reshape(\n",
        "        pred, [-1, self._num_agents_per_scenario, self._num_future_steps, 2])\n",
        "    return pred\n",
        "\n",
        "\n",
        "class MotionMetrics(tf.keras.metrics.Metric):\n",
        "  \"\"\"Wrapper for motion metrics computation.\"\"\"\n",
        "\n",
        "  def __init__(self, config):\n",
        "    super().__init__()\n",
        "    self._prediction_trajectory = []\n",
        "    self._prediction_score = []\n",
        "    self._ground_truth_trajectory = []\n",
        "    self._ground_truth_is_valid = []\n",
        "    self._prediction_ground_truth_indices = []\n",
        "    self._prediction_ground_truth_indices_mask = []\n",
        "    self._object_type = []\n",
        "    self._metrics_config = config\n",
        "\n",
        "  def reset_state():\n",
        "    self._prediction_trajectory = []\n",
        "    self._prediction_score = []\n",
        "    self._ground_truth_trajectory = []\n",
        "    self._ground_truth_is_valid = []\n",
        "    self._prediction_ground_truth_indices = []\n",
        "    self._prediction_ground_truth_indices_mask = []\n",
        "    self._object_type = []\n",
        "\n",
        "  def update_state(self, prediction_trajectory, prediction_score,\n",
        "                   ground_truth_trajectory, ground_truth_is_valid,\n",
        "                   prediction_ground_truth_indices,\n",
        "                   prediction_ground_truth_indices_mask, object_type):\n",
        "    self._prediction_trajectory.append(prediction_trajectory)\n",
        "    self._prediction_score.append(prediction_score)\n",
        "    self._ground_truth_trajectory.append(ground_truth_trajectory)\n",
        "    self._ground_truth_is_valid.append(ground_truth_is_valid)\n",
        "    self._prediction_ground_truth_indices.append(\n",
        "        prediction_ground_truth_indices)\n",
        "    self._prediction_ground_truth_indices_mask.append(\n",
        "        prediction_ground_truth_indices_mask)\n",
        "    self._object_type.append(object_type)\n",
        "\n",
        "  def result(self):\n",
        "    # [batch_size, num_preds, 1, 1, steps, 2].\n",
        "    # The ones indicate top_k = 1, num_agents_per_joint_prediction = 1.\n",
        "    prediction_trajectory = tf.concat(self._prediction_trajectory, 0)\n",
        "    # [batch_size, num_preds, 1].\n",
        "    prediction_score = tf.concat(self._prediction_score, 0)\n",
        "    # [batch_size, num_agents, gt_steps, 7].\n",
        "    ground_truth_trajectory = tf.concat(self._ground_truth_trajectory, 0)\n",
        "    # [batch_size, num_agents, gt_steps].\n",
        "    ground_truth_is_valid = tf.concat(self._ground_truth_is_valid, 0)\n",
        "    # [batch_size, num_preds, 1].\n",
        "    prediction_ground_truth_indices = tf.concat(\n",
        "        self._prediction_ground_truth_indices, 0)\n",
        "    # [batch_size, num_preds, 1].\n",
        "    prediction_ground_truth_indices_mask = tf.concat(\n",
        "        self._prediction_ground_truth_indices_mask, 0)\n",
        "    # [batch_size, num_agents].\n",
        "    object_type = tf.cast(tf.concat(self._object_type, 0), tf.int64)\n",
        "\n",
        "    # We are predicting more steps than needed by the eval code. Subsample.\n",
        "    interval = (\n",
        "        self._metrics_config.track_steps_per_second //\n",
        "        self._metrics_config.prediction_steps_per_second)\n",
        "    prediction_trajectory = prediction_trajectory[...,\n",
        "                                                  (interval - 1)::interval, :]\n",
        "\n",
        "    return py_metrics_ops.motion_metrics(\n",
        "        config=self._metrics_config.SerializeToString(),\n",
        "        prediction_trajectory=prediction_trajectory,\n",
        "        prediction_score=prediction_score,\n",
        "        ground_truth_trajectory=ground_truth_trajectory,\n",
        "        ground_truth_is_valid=ground_truth_is_valid,\n",
        "        prediction_ground_truth_indices=prediction_ground_truth_indices,\n",
        "        prediction_ground_truth_indices_mask=prediction_ground_truth_indices_mask,\n",
        "        object_type=object_type)\n",
        "\n",
        "\n",
        "model = SimpleModel(128, 11, 80)\n",
        "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
        "loss_fn = tf.keras.losses.MeanSquaredError()\n",
        "metrics_config = _default_metrics_config()\n",
        "motion_metrics = MotionMetrics(metrics_config)\n",
        "metric_names = config_util.get_breakdown_names_from_motion_config(\n",
        "    metrics_config)\n",
        "\n",
        "\n",
        "def train_step(inputs):\n",
        "  with tf.GradientTape() as tape:\n",
        "    # [batch_size, num_agents, D]\n",
        "    states = inputs['input_states']\n",
        "\n",
        "    # Predict. [batch_size, num_agents, steps, 2].\n",
        "    pred_trajectory = model(states, training=True)\n",
        "\n",
        "    # Set training target.\n",
        "    prediction_start = metrics_config.track_history_samples + 1\n",
        "\n",
        "    # [batch_size, num_agents, steps, 7]\n",
        "    gt_trajectory = inputs['gt_future_states']\n",
        "    gt_targets = gt_trajectory[..., prediction_start:, :2]\n",
        "\n",
        "    # [batch_size, num_agents, steps]\n",
        "    gt_is_valid = inputs['gt_future_is_valid']\n",
        "    # [batch_size, num_agents, steps]\n",
        "    weights = (\n",
        "        tf.cast(inputs['gt_future_is_valid'][..., prediction_start:],\n",
        "                tf.float32) *\n",
        "        tf.cast(inputs['tracks_to_predict'][..., tf.newaxis], tf.float32))\n",
        "\n",
        "    loss_value = loss_fn(gt_targets, pred_trajectory, sample_weight=weights)\n",
        "  grads = tape.gradient(loss_value, model.trainable_weights)\n",
        "  optimizer.apply_gradients(zip(grads, model.trainable_weights))\n",
        "\n",
        "  # [batch_size, num_agents, steps, 2] -\u003e\n",
        "  # [batch_size, num_agents, 1, 1, steps, 2].\n",
        "  # The added dimensions are top_k = 1, num_agents_per_joint_prediction = 1.\n",
        "  pred_trajectory = pred_trajectory[:, :, tf.newaxis, tf.newaxis]\n",
        "\n",
        "  # Fake the score since this model does not generate any score per predicted\n",
        "  # trajectory.\n",
        "  pred_score = tf.ones(shape=tf.shape(pred_trajectory)[:3])\n",
        "\n",
        "  # [batch_size, num_agents].\n",
        "  object_type = inputs['object_type']\n",
        "\n",
        "  # [batch_size, num_agents].\n",
        "  batch_size = tf.shape(inputs['tracks_to_predict'])[0]\n",
        "  num_samples = tf.shape(inputs['tracks_to_predict'])[1]\n",
        "\n",
        "  pred_gt_indices = tf.range(num_samples, dtype=tf.int64)\n",
        "  # [batch_size, num_agents, 1].\n",
        "  pred_gt_indices = tf.tile(pred_gt_indices[tf.newaxis, :, tf.newaxis],\n",
        "                            (batch_size, 1, 1))\n",
        "  # [batch_size, num_agents, 1].\n",
        "  pred_gt_indices_mask = inputs['tracks_to_predict'][..., tf.newaxis]\n",
        "\n",
        "  motion_metrics.update_state(pred_trajectory, pred_score, gt_trajectory,\n",
        "                              gt_is_valid, pred_gt_indices,\n",
        "                              pred_gt_indices_mask, object_type)\n",
        "\n",
        "  return loss_value\n",
        "\n",
        "\n",
        "dataset = tf.data.TFRecordDataset(FILENAME)\n",
        "dataset = dataset.map(_parse)\n",
        "dataset = dataset.batch(32)\n",
        "\n",
        "epochs = 2\n",
        "num_batches_per_epoch = 10\n",
        "\n",
        "for epoch in range(epochs):\n",
        "  print('\\nStart of epoch %d' % (epoch,))\n",
        "  start_time = time.time()\n",
        "\n",
        "  # Iterate over the batches of the dataset.\n",
        "  for step, batch in enumerate(dataset):\n",
        "    loss_value = train_step(batch)\n",
        "\n",
        "    # Log every 10 batches.\n",
        "    if step % 10 == 0:\n",
        "      print('Training loss (for one batch) at step %d: %.4f' %\n",
        "            (step, float(loss_value)))\n",
        "      print('Seen so far: %d samples' % ((step + 1) * 64))\n",
        "\n",
        "    if step \u003e= num_batches_per_epoch:\n",
        "      break\n",
        "\n",
        "  # Display metrics at the end of each epoch.\n",
        "  train_metric_values = motion_metrics.result()\n",
        "  for i, m in enumerate(\n",
        "      ['min_ade', 'min_fde', 'miss_rate', 'overlap_rate', 'map']):\n",
        "    for j, n in enumerate(metric_names):\n",
        "      print('{}/{}: {}'.format(m, n, train_metric_values[i, j]))"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "",
        "kind": "local"
      },
      "name": "Waymo Open Dataset Motion Tutorial",
      "provenance": [
        {
          "file_id": "1VrSkEvjqNaShhQS1i3GlNqegLwqmqlcM",
          "timestamp": 1615354811513
        },
        {
          "file_id": "redacted",
          "timestamp": 1615333360862
        },
        {
          "file_id": "1FS9qXkF5DBPVobGCMwk_7ZgPUuf3YyWp",
          "timestamp": 1613686002912
        }
      ],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
