{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cYLZzIbjk62B"
      },
      "source": [
        "# Waymo Open Dataset Occupancy and Flow Tutorial\n",
        "\n",
        "- Website: https://waymo.com/open/challenges/2022/occupancy-flow-prediction-challenge/\n",
        "- GitHub: https://github.com/waymo-research/waymo-open-dataset\n",
        "\n",
        "This tutorial demonstrates:\n",
        "\n",
        "- How to decode and interpret the data in the WOD Motion dataset.\n",
        "- How to construct ground-truth occupancy grids and flow fields.\n",
        "- How to train a simple model with Tensorflow.\n",
        "- How to run inference on the test set and store the results into a protobuf for submission."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9xJSIyk7woTT"
      },
      "source": [
        "# Installation\n",
        "\n",
        "To run a Jupyter kernel locally, run:\n",
        "\n",
        "```\n",
        "$ pip install \"waymo_open_dataset_tf_2_6_0==1.4.5\"\n",
        "$ pip install \"notebook\u003e=5.3\"\n",
        "$ jupyter notebook\n",
        "```"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "M5gzSlBTlTiS"
      },
      "outputs": [],
      "source": [
        "import pathlib\n",
        "import os\n",
        "from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union\n",
        "import uuid\n",
        "import zlib\n",
        "\n",
        "from IPython.display import HTML\n",
        "import matplotlib.animation as animation\n",
        "import matplotlib.pyplot as plt\n",
        "import numpy as np\n",
        "import tensorflow as tf\n",
        "import tensorflow_graphics.image.transformer as tfg_transformer\n",
        "\n",
        "from google.protobuf import text_format\n",
        "from waymo_open_dataset.protos import occupancy_flow_metrics_pb2\n",
        "from waymo_open_dataset.protos import occupancy_flow_submission_pb2\n",
        "from waymo_open_dataset.protos import scenario_pb2\n",
        "from waymo_open_dataset.utils import occupancy_flow_data\n",
        "from waymo_open_dataset.utils import occupancy_flow_grids\n",
        "from waymo_open_dataset.utils import occupancy_flow_metrics\n",
        "from waymo_open_dataset.utils import occupancy_flow_renderer\n",
        "from waymo_open_dataset.utils import occupancy_flow_vis"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4Y-PWAeJpTlL"
      },
      "source": [
        "# Data location\n",
        "\n",
        "Visit the [Waymo Open Dataset Website](https://waymo.com/open) to download the full dataset."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xdEcN6WilcBn"
      },
      "outputs": [],
      "source": [
        "# PLEASE EDIT.\n",
        "\n",
        "# A tfrecord containing tf.Example protos as downloaded from the Waymo Open\n",
        "# Dataset (motion) webpage.\n",
        "\n",
        "# Replace this path with your own tfrecords.\n",
        "DATASET_FOLDER = '/path/to/waymo_open_dataset_motion_v_1_1_0/uncompressed'\n",
        "\n",
        "# TFRecord dataset.\n",
        "TRAIN_FILES = f'{DATASET_FOLDER}/tf_example/training/training_tfexample.tfrecord*'\n",
        "VAL_FILES = f'{DATASET_FOLDER}/tf_example/validation/validation_tfexample.tfrecord*'\n",
        "TEST_FILES = f'{DATASET_FOLDER}/tf_example/testing/testing_tfexample.tfrecord*'\n",
        "\n",

        "\n",
        "# Text files containing validation and test scenario IDs for this challenge.\n",
        "VAL_SCENARIO_IDS_FILE = f'{DATASET_FOLDER}/occupancy_flow_challenge/validation_scenario_ids.txt'\n",
        "TEST_SCENARIO_IDS_FILE = f'{DATASET_FOLDER}/occupancy_flow_challenge/testing_scenario_ids.txt'"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Uq_3uf6veILl"
      },
      "source": [
        "# Create dataset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "aHBTtPC_NOtw"
      },
      "outputs": [],
      "source": [
        "filenames = tf.io.matching_files(TRAIN_FILES)\n",
        "dataset = tf.data.TFRecordDataset(filenames)\n",
        "dataset = dataset.repeat()\n",
        "dataset = dataset.map(occupancy_flow_data.parse_tf_example)\n",
        "dataset = dataset.batch(16)\n",
        "it = iter(dataset)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "10ssl6g0epfM"
      },
      "source": [
        "## Load one example"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ZYZezoUI54FH"
      },
      "outputs": [],
      "source": [
        "inputs = next(it)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "trAv9YGrvYnc"
      },
      "source": [
        "# Visualize TF Example"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Zdc8CBg27dtn"
      },
      "source": [
        "## Generate visualization images"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "utTE9Mtgx3Fq"
      },
      "outputs": [],
      "source": [
        "def create_figure_and_axes(size_pixels):\n",
        "  \"\"\"Initializes a unique figure and axes for plotting.\"\"\"\n",
        "  fig, ax = plt.subplots(1, 1, num=uuid.uuid4())\n",
        "\n",
        "  # Sets output image to pixel resolution.\n",
        "  dpi = 100\n",
        "  size_inches = size_pixels / dpi\n",
        "  fig.set_size_inches([size_inches, size_inches])\n",
        "  fig.set_dpi(dpi)\n",
        "  fig.set_facecolor('white')\n",
        "  ax.set_facecolor('white')\n",
        "  ax.xaxis.label.set_color('black')\n",
        "  ax.tick_params(axis='x', colors='black')\n",
        "  ax.yaxis.label.set_color('black')\n",
        "  ax.tick_params(axis='y', colors='black')\n",
        "  fig.set_tight_layout(True)\n",
        "  ax.grid(False)\n",
        "  return fig, ax\n",
        "\n",
        "\n",
        "def fig_canvas_image(fig):\n",
        "  \"\"\"Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb().\"\"\"\n",
        "  # Just enough margin in the figure to display xticks and yticks.\n",
        "  fig.subplots_adjust(\n",
        "      left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)\n",
        "  fig.canvas.draw()\n",
        "  data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n",
        "  return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
        "\n",
        "\n",
        "def get_colormap(num_agents):\n",
        "  \"\"\"Compute a color map array of shape [num_agents, 4].\"\"\"\n",
        "  colors = plt.cm.get_cmap('jet', num_agents)\n",
        "  colors = colors(range(num_agents))\n",
        "  np.random.shuffle(colors)\n",
        "  return colors\n",
        "\n",
        "\n",
        "def get_viewport(all_states, all_states_mask):\n",
        "  \"\"\"Gets the region containing the data.\n",
        "\n",
        "  Args:\n",
        "    all_states: states of agents as an array of shape [num_agents, num_steps,\n",
        "      2].\n",
        "    all_states_mask: binary mask of shape [num_agents, num_steps] for\n",
        "      `all_states`.\n",
        "\n",
        "  Returns:\n",
        "    center_y: float. y coordinate for center of data.\n",
        "    center_x: float. x coordinate for center of data.\n",
        "    width: float. Width of data.\n",
        "  \"\"\"\n",
        "  valid_states = all_states[all_states_mask]\n",
        "  all_y = valid_states[..., 1]\n",
        "  all_x = valid_states[..., 0]\n",
        "\n",
        "  center_y = (np.max(all_y) + np.min(all_y)) / 2\n",
        "  center_x = (np.max(all_x) + np.min(all_x)) / 2\n",
        "\n",
        "  range_y = np.ptp(all_y)\n",
        "  range_x = np.ptp(all_x)\n",
        "\n",
        "  width = max(range_y, range_x)\n",
        "\n",
        "  return center_y, center_x, width\n",
        "\n",
        "\n",
        "def visualize_one_step(\n",
        "    states,\n",
        "    mask,\n",
        "    roadgraph,\n",
        "    title,\n",
        "    center_y,\n",
        "    center_x,\n",
        "    width,\n",
        "    color_map,\n",
        "    size_pixels=1000,\n",
        "):\n",
        "  \"\"\"Generate visualization for a single step.\"\"\"\n",
        "\n",
        "  # Create figure and axes.\n",
        "  fig, ax = create_figure_and_axes(size_pixels=size_pixels)\n",
        "\n",
        "  # Plot roadgraph.\n",
        "  rg_pts = roadgraph[:, :2].T\n",
        "  ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)\n",
        "\n",
        "  masked_x = states[:, 0][mask]\n",
        "  masked_y = states[:, 1][mask]\n",
        "  colors = color_map[mask]\n",
        "\n",
        "  # Plot agent current position.\n",
        "  ax.scatter(\n",
        "      masked_x,\n",
        "      masked_y,\n",
        "      marker='o',\n",
        "      linewidths=3,\n",
        "      color=colors,\n",
        "  )\n",
        "\n",
        "  # Title.\n",
        "  ax.set_title(title)\n",
        "\n",
        "  # Set axes.  Should be at least 10m on a side.\n",
        "  size = max(10, width * 1.0)\n",
        "  ax.axis([\n",
        "      -size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,\n",
        "      size / 2 + center_y\n",
        "  ])\n",
        "  ax.set_aspect('equal')\n",
        "\n",
        "  image = fig_canvas_image(fig)\n",
        "  plt.close(fig)\n",
        "  return image\n",
        "\n",
        "\n",
        "def visualize_all_agents_smooth(\n",
        "    decoded_example,\n",
        "    size_pixels=1000,\n",
        "):\n",
        "  \"\"\"Visualizes all agent predicted trajectories in a serie of images.\n",
        "\n",
        "  Args:\n",
        "    decoded_example: Dictionary containing agent info about all modeled agents.\n",
        "    size_pixels: The size in pixels of the output image.\n",
        "\n",
        "  Returns:\n",
        "    T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.\n",
        "  \"\"\"\n",
        "  # [num_agents, num_past_steps, 2] float32.\n",
        "  past_states = tf.stack(\n",
        "      [decoded_example['state/past/x'], decoded_example['state/past/y']],\n",
        "      -1).numpy()\n",
        "  past_states_mask = decoded_example['state/past/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_agents, 1, 2] float32.\n",
        "  current_states = tf.stack(\n",
        "      [decoded_example['state/current/x'], decoded_example['state/current/y']],\n",
        "      -1).numpy()\n",
        "  current_states_mask = decoded_example['state/current/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_agents, num_future_steps, 2] float32.\n",
        "  future_states = tf.stack(\n",
        "      [decoded_example['state/future/x'], decoded_example['state/future/y']],\n",
        "      -1).numpy()\n",
        "  future_states_mask = decoded_example['state/future/valid'].numpy() \u003e 0.0\n",
        "\n",
        "  # [num_points, 3] float32.\n",
        "  roadgraph_xyz = decoded_example['roadgraph_samples/xyz'].numpy()\n",
        "\n",
        "  num_agents, num_past_steps, _ = past_states.shape\n",
        "  num_future_steps = future_states.shape[1]\n",
        "\n",
        "  color_map = get_colormap(num_agents)\n",
        "\n",
        "  # [num_agents, num_past_steps + 1 + num_future_steps, depth] float32.\n",
        "  all_states = np.concatenate([past_states, current_states, future_states], 1)\n",
        "\n",
        "  # [num_agents, num_past_steps + 1 + num_future_steps] float32.\n",
        "  all_states_mask = np.concatenate(\n",
        "      [past_states_mask, current_states_mask, future_states_mask], 1)\n",
        "\n",
        "  center_y, center_x, width = get_viewport(all_states, all_states_mask)\n",
        "\n",
        "  images = []\n",
        "\n",
        "  # Generate images from past time steps.\n",
        "  for i, (s, m) in enumerate(\n",
        "      zip(\n",
        "          np.split(past_states, num_past_steps, 1),\n",
        "          np.split(past_states_mask, num_past_steps, 1))):\n",
        "    im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,\n",
        "                            'past: %d' % (num_past_steps - i), center_y,\n",
        "                            center_x, width, color_map, size_pixels)\n",
        "    images.append(im)\n",
        "\n",
        "  # Generate one image for the current time step.\n",
        "  s = current_states\n",
        "  m = current_states_mask\n",
        "\n",
        "  im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,\n",
        "                          center_x, width, color_map, size_pixels)\n",
        "  images.append(im)\n",
        "\n",
        "  # Generate images from future time steps.\n",
        "  for i, (s, m) in enumerate(\n",
        "      zip(\n",
        "          np.split(future_states, num_future_steps, 1),\n",
        "          np.split(future_states_mask, num_future_steps, 1))):\n",
        "    im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,\n",
        "                            'future: %d' % (i + 1), center_y, center_x, width,\n",
        "                            color_map, size_pixels)\n",
        "    images.append(im)\n",
        "\n",
        "  return images\n",
        "\n",
        "inputs_no_batch = {k: v[0] for k, v in inputs.items()}\n",
        "images = visualize_all_agents_smooth(inputs_no_batch)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OrIZjUHG7hM3"
      },
      "source": [
        "## Display animation"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tt2IeGiG0eny"
      },
      "outputs": [],
      "source": [
        "def create_animation(images, interval=100):\n",
        "  \"\"\" Creates a Matplotlib animation of the given images.\n",
        "\n",
        "  Args:\n",
        "    images: A list of numpy arrays representing the images.\n",
        "    interval: Delay between frames in milliseconds.\n",
        "\n",
        "  Returns:\n",
        "    A matplotlib.animation.Animation.\n",
        "\n",
        "  Usage:\n",
        "    anim = create_animation(images)\n",
        "    anim.save('/tmp/animation.avi')\n",
        "    HTML(anim.to_html5_video())\n",
        "  \"\"\"\n",
        "\n",
        "  plt.ioff()\n",
        "  fig, ax = plt.subplots()\n",
        "  dpi = 100\n",
        "  size_inches = 1000 / dpi\n",
        "  fig.set_size_inches([size_inches, size_inches])\n",
        "  plt.ion()\n",
        "\n",
        "  def animate_func(i):\n",
        "    ax.imshow(images[i])\n",
        "    ax.set_xticks([])\n",
        "    ax.set_yticks([])\n",
        "    ax.grid('off')\n",
        "\n",
        "  anim = animation.FuncAnimation(\n",
        "      fig, animate_func, frames=len(images), interval=interval)\n",
        "  plt.close(fig)\n",
        "  return anim\n",
        "\n",
        "\n",
        "anim = create_animation(images[::5])\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gCjWmrQTBR0n"
      },
      "source": [
        "# Config"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "bKIxOTyOcxQl"
      },
      "outputs": [],
      "source": [
        "config = occupancy_flow_metrics_pb2.OccupancyFlowTaskConfig()\n",
        "config_text = \"\"\"\n",
        "num_past_steps: 10\n",
        "num_future_steps: 80\n",
        "num_waypoints: 8\n",
        "cumulative_waypoints: false\n",
        "normalize_sdc_yaw: true\n",
        "grid_height_cells: 256\n",
        "grid_width_cells: 256\n",
        "sdc_y_in_grid: 192\n",
        "sdc_x_in_grid: 128\n",
        "pixels_per_meter: 3.2\n",
        "agent_points_per_side_length: 48\n",
        "agent_points_per_side_width: 16\n",
        "\"\"\"\n",
        "text_format.Parse(config_text, config)\n",
        "config"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mJNOa2IGff_j"
      },
      "source": [
        "# Occupancy flow ground truth"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7bRjML3UuPff"
      },
      "source": [
        "## Generate"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "TC8za-J1b3Q3"
      },
      "outputs": [],
      "source": [
        "inputs = occupancy_flow_data.add_sdc_fields(inputs)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oh2XHMaJ9pG2"
      },
      "outputs": [],
      "source": [
        "timestep_grids = occupancy_flow_grids.create_ground_truth_timestep_grids(\n",
        "    inputs=inputs, config=config)\n",
        "print(timestep_grids.vehicles.future_observed_occupancy.shape)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9GoiXN1O-H00"
      },
      "outputs": [],
      "source": [
        "true_waypoints = occupancy_flow_grids.create_ground_truth_waypoint_grids(\n",
        "    timestep_grids=timestep_grids, config=config)\n",
        "print(true_waypoints.vehicles.observed_occupancy[0].shape)\n",
        "print(true_waypoints.vehicles.occluded_occupancy[0].shape)\n",
        "print(true_waypoints.vehicles.flow[0].shape)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "34eaArSv-WjX"
      },
      "outputs": [],
      "source": [
        "vis_grids = occupancy_flow_grids.create_ground_truth_vis_grids(\n",
        "    inputs=inputs, timestep_grids=timestep_grids, config=config)\n",
        "print(vis_grids.roadgraph.shape)\n",
        "print(vis_grids.agent_trails.shape)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Cvw1FZQIBUvN"
      },
      "source": [
        "## Visualize"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YejgDsuP8l-u"
      },
      "outputs": [],
      "source": [
        "# Visualize waypoint 4 out of 8.\n",
        "k = 3\n",
        "observed_occupancy_grids = true_waypoints.get_observed_occupancy_at_waypoint(k)\n",
        "observed_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "    agent_grids=observed_occupancy_grids,\n",
        "    roadgraph_image=vis_grids.roadgraph,\n",
        "    gamma=1.6,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "48Z5QjWb9NVW"
      },
      "outputs": [],
      "source": [
        "plt.imshow(observed_occupancy_rgb[0])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-7wKA0lP_bcP"
      },
      "outputs": [],
      "source": [
        "occluded_occupancy_grids = true_waypoints.get_occluded_occupancy_at_waypoint(k)\n",
        "occluded_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "    agent_grids=occluded_occupancy_grids,\n",
        "    roadgraph_image=vis_grids.roadgraph,\n",
        "    gamma=1.6,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "voCCSRMa_xkn"
      },
      "outputs": [],
      "source": [
        "plt.imshow(occluded_occupancy_rgb[0])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Sa-l47iAANmc"
      },
      "outputs": [],
      "source": [
        "flow_rgb = occupancy_flow_vis.flow_rgb_image(\n",
        "    flow=true_waypoints.vehicles.flow[k],\n",
        "    roadgraph_image=vis_grids.roadgraph,\n",
        "    agent_trails=vis_grids.agent_trails,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "inZWrLkLAwfc"
      },
      "outputs": [],
      "source": [
        "plt.imshow(flow_rgb[0])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "rgUQxgvFBQ9F"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  observed_occupancy_grids = true_waypoints.get_observed_occupancy_at_waypoint(\n",
        "      k)\n",
        "  observed_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "      agent_grids=observed_occupancy_grids,\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      gamma=1.6,\n",
        "  )\n",
        "  images.append(observed_occupancy_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OoA3N5YMGEQU"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  occluded_occupancy_grids = true_waypoints.get_occluded_occupancy_at_waypoint(\n",
        "      k)\n",
        "  occluded_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "      agent_grids=occluded_occupancy_grids,\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      gamma=1.6,\n",
        "  )\n",
        "  images.append(occluded_occupancy_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PNs2IyOHGWWM"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  flow_rgb = occupancy_flow_vis.flow_rgb_image(\n",
        "      flow=true_waypoints.vehicles.flow[k],\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      agent_trails=vis_grids.agent_trails,\n",
        "  )\n",
        "  images.append(flow_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7QGukTtwft6Q"
      },
      "source": [
        "# Baseline model\n",
        "\n",
        "Note that this is a very simple model to demonstrate training.\n",
        "\n",
        "Suggestion for a stronger model:  Replace the convnet encoder with a point-based encoder like Transformer or Point Pillars and feed it points from the scene with rich features.  See function `_sample_agent_points()` in [occupancy_flow_renderer.py](https://github.com/waymo-research/waymo-open-dataset/blob/master/utils/occupancy_flow_renderer.py) for ideas on how to obtain such points and features.\n",
        "\n",
        "Relevant publications:\n",
        "\n",
        "- [Scene Transformer](https://arxiv.org/pdf/2106.08417.pdf)\n",
        "- [Occupancy Flow Fields](https://arxiv.org/pdf/2203.03875.pdf)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ilzbfAzm13cr"
      },
      "outputs": [],
      "source": [
        "# Number of channels output by the model.\n",
        "# Occupancy of currently-observed vehicles: 1 channel.\n",
        "# Occupancy of currently-occluded vehicles: 1 channel.\n",
        "# Flow of all vehicles: 2 channels.\n",
        "NUM_PRED_CHANNELS = 4"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1pghpB7PlNwU"
      },
      "outputs": [],
      "source": [
        "def _make_model_inputs(\n",
        "    timestep_grids: occupancy_flow_grids.TimestepGrids,\n",
        "    vis_grids: occupancy_flow_grids.VisGrids,\n",
        ") -\u003e tf.Tensor:\n",
        "  \"\"\"Concatenates all occupancy grids over past, current to a single tensor.\"\"\"\n",
        "  model_inputs = tf.concat(\n",
        "      [\n",
        "          vis_grids.roadgraph,\n",
        "          timestep_grids.vehicles.past_occupancy,\n",
        "          timestep_grids.vehicles.current_occupancy,\n",
        "          tf.clip_by_value(\n",
        "              timestep_grids.pedestrians.past_occupancy +\n",
        "              timestep_grids.cyclists.past_occupancy, 0, 1),\n",
        "          tf.clip_by_value(\n",
        "              timestep_grids.pedestrians.current_occupancy +\n",
        "              timestep_grids.cyclists.current_occupancy, 0, 1),\n",
        "      ],\n",
        "      axis=-1,\n",
        "  )\n",
        "  return model_inputs"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "TgwPZhi7fwwI"
      },
      "outputs": [],
      "source": [
        "def _make_model(\n",
        "    model_inputs: tf.Tensor,\n",
        "    config: occupancy_flow_metrics_pb2.OccupancyFlowTaskConfig,\n",
        ") -\u003e tf.keras.Model:\n",
        "  \"\"\"Simple convolutional model.\"\"\"\n",
        "  inputs = tf.keras.Input(tensor=model_inputs)\n",
        "\n",
        "  encoder = tf.keras.applications.ResNet50V2(\n",
        "      include_top=False, weights=None, input_tensor=inputs)\n",
        "\n",
        "  num_output_channels = NUM_PRED_CHANNELS * config.num_waypoints\n",
        "  decoder_channels = [32, 64, 128, 256, 512]\n",
        "\n",
        "  conv2d_kwargs = {\n",
        "      'kernel_size': 3,\n",
        "      'strides': 1,\n",
        "      'padding': 'same',\n",
        "  }\n",
        "\n",
        "  x = encoder(inputs)\n",
        "\n",
        "  for i in [4, 3, 2, 1, 0]:\n",
        "    x = tf.keras.layers.Conv2D(\n",
        "        filters=decoder_channels[i],\n",
        "        activation='relu',\n",
        "        name=f'upconv_{i}_0',\n",
        "        **conv2d_kwargs)(\n",
        "            x)\n",
        "    x = tf.keras.layers.UpSampling2D(name=f'upsample_{i}')(x)\n",
        "    x = tf.keras.layers.Conv2D(\n",
        "        filters=decoder_channels[i],\n",
        "        activation='relu',\n",
        "        name=f'upconv_{i}_1',\n",
        "        **conv2d_kwargs)(\n",
        "            x)\n",
        "\n",
        "  outputs = tf.keras.layers.Conv2D(\n",
        "      filters=num_output_channels,\n",
        "      activation=None,\n",
        "      name=f'outconv',\n",
        "      **conv2d_kwargs)(\n",
        "          x)\n",
        "\n",
        "  return tf.keras.Model(\n",
        "      inputs=inputs, outputs=outputs, name='occupancy_flow_model')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "13X8l9lPTM5_"
      },
      "outputs": [],
      "source": [
        "model_inputs = _make_model_inputs(timestep_grids, vis_grids)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-L0GYuY4ul2M"
      },
      "outputs": [],
      "source": [
        "model = _make_model(model_inputs=model_inputs, config=config)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vQ0m6nRaqrh7"
      },
      "outputs": [],
      "source": [
        "model.summary()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ko52_WcHtDmf"
      },
      "outputs": [],
      "source": [
        "{v.name: v.shape for v in model.variables}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Y6PqOua2y0Gq"
      },
      "outputs": [],
      "source": [
        "model_outputs = model(model_inputs)\n",
        "model_outputs.shape"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "JOBjxBqn4hC_"
      },
      "outputs": [],
      "source": [
        "def _get_pred_waypoint_logits(\n",
        "    model_outputs: tf.Tensor) -\u003e occupancy_flow_grids.WaypointGrids:\n",
        "  \"\"\"Slices model predictions into occupancy and flow grids.\"\"\"\n",
        "  pred_waypoint_logits = occupancy_flow_grids.WaypointGrids()\n",
        "\n",
        "  # Slice channels into output predictions.\n",
        "  for k in range(config.num_waypoints):\n",
        "    index = k * NUM_PRED_CHANNELS\n",
        "    waypoint_channels = model_outputs[:, :, :, index:index + NUM_PRED_CHANNELS]\n",
        "    pred_observed_occupancy = waypoint_channels[:, :, :, :1]\n",
        "    pred_occluded_occupancy = waypoint_channels[:, :, :, 1:2]\n",
        "    pred_flow = waypoint_channels[:, :, :, 2:]\n",
        "    pred_waypoint_logits.vehicles.observed_occupancy.append(\n",
        "        pred_observed_occupancy)\n",
        "    pred_waypoint_logits.vehicles.occluded_occupancy.append(\n",
        "        pred_occluded_occupancy)\n",
        "    pred_waypoint_logits.vehicles.flow.append(pred_flow)\n",
        "\n",
        "  return pred_waypoint_logits"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Le8DM6w96kdM"
      },
      "outputs": [],
      "source": [
        "pred_waypoint_logits = _get_pred_waypoint_logits(model_outputs)\n",
        "vehicle_grids = pred_waypoint_logits.vehicles\n",
        "print(len(vehicle_grids.observed_occupancy), 'observed occupancy grids.')\n",
        "print(len(vehicle_grids.occluded_occupancy), 'occluded occupancy grids.')\n",
        "print(len(vehicle_grids.flow), 'flow fields.')"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QrBFOs4g6vde"
      },
      "source": [
        "## Loss"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "kKfjmsVU6wvQ"
      },
      "outputs": [],
      "source": [
        "def _occupancy_flow_loss(\n",
        "    config: occupancy_flow_metrics_pb2.OccupancyFlowTaskConfig,\n",
        "    true_waypoints: occupancy_flow_grids.WaypointGrids,\n",
        "    pred_waypoint_logits: occupancy_flow_grids.WaypointGrids,\n",
        ") -\u003e Dict[str, tf.Tensor]:\n",
        "  \"\"\"Loss function.\n",
        "\n",
        "  Args:\n",
        "    config: OccupancyFlowTaskConfig proto message.\n",
        "    true_waypoints: Ground truth labels.\n",
        "    pred_waypoint_logits: Predicted occupancy logits and flows.\n",
        "\n",
        "  Returns:\n",
        "    A dict containing different loss tensors:\n",
        "      observed_xe: Observed occupancy cross-entropy loss.\n",
        "      occluded_xe: Occluded occupancy cross-entropy loss.\n",
        "      flow: Flow loss.\n",
        "  \"\"\"\n",
        "  loss_dict = {}\n",
        "  # Store loss tensors for each waypoint and average at the end.\n",
        "  loss_dict['observed_xe'] = []\n",
        "  loss_dict['occluded_xe'] = []\n",
        "  loss_dict['flow'] = []\n",
        "\n",
        "  # Iterate over waypoints.\n",
        "  for k in range(config.num_waypoints):\n",
        "    # Occupancy cross-entropy loss.\n",
        "    pred_observed_occupancy_logit = (\n",
        "        pred_waypoint_logits.vehicles.observed_occupancy[k])\n",
        "    pred_occluded_occupancy_logit = (\n",
        "        pred_waypoint_logits.vehicles.occluded_occupancy[k])\n",
        "    true_observed_occupancy = true_waypoints.vehicles.observed_occupancy[k]\n",
        "    true_occluded_occupancy = true_waypoints.vehicles.occluded_occupancy[k]\n",
        "\n",
        "    # Accumulate over waypoints.\n",
        "    loss_dict['observed_xe'].append(\n",
        "        _sigmoid_xe_loss(\n",
        "            true_occupancy=true_observed_occupancy,\n",
        "            pred_occupancy=pred_observed_occupancy_logit))\n",
        "    loss_dict['occluded_xe'].append(\n",
        "        _sigmoid_xe_loss(\n",
        "            true_occupancy=true_occluded_occupancy,\n",
        "            pred_occupancy=pred_occluded_occupancy_logit))\n",
        "\n",
        "    # Flow loss.\n",
        "    pred_flow = pred_waypoint_logits.vehicles.flow[k]\n",
        "    true_flow = true_waypoints.vehicles.flow[k]\n",
        "    loss_dict['flow'].append(_flow_loss(pred_flow, true_flow))\n",
        "\n",
        "  # Mean over waypoints.\n",
        "  loss_dict['observed_xe'] = (\n",
        "      tf.math.add_n(loss_dict['observed_xe']) / config.num_waypoints)\n",
        "  loss_dict['occluded_xe'] = (\n",
        "      tf.math.add_n(loss_dict['occluded_xe']) / config.num_waypoints)\n",
        "  loss_dict['flow'] = tf.math.add_n(loss_dict['flow']) / config.num_waypoints\n",
        "\n",
        "  return loss_dict\n",
        "\n",
        "\n",
        "def _sigmoid_xe_loss(\n",
        "    true_occupancy: tf.Tensor,\n",
        "    pred_occupancy: tf.Tensor,\n",
        "    loss_weight: float = 1000,\n",
        ") -\u003e tf.Tensor:\n",
        "  \"\"\"Computes sigmoid cross-entropy loss over all grid cells.\"\"\"\n",
        "  # Since the mean over per-pixel cross-entropy values can get very small,\n",
        "  # we compute the sum and multiply it by the loss weight before computing\n",
        "  # the mean.\n",
        "  xe_sum = tf.reduce_sum(\n",
        "      tf.nn.sigmoid_cross_entropy_with_logits(\n",
        "          labels=_batch_flatten(true_occupancy),\n",
        "          logits=_batch_flatten(pred_occupancy),\n",
        "      ))\n",
        "  # Return mean.\n",
        "  return loss_weight * xe_sum / tf.size(pred_occupancy, out_type=tf.float32)\n",
        "\n",
        "\n",
        "def _flow_loss(\n",
        "    true_flow: tf.Tensor,\n",
        "    pred_flow: tf.Tensor,\n",
        "    loss_weight: float = 1,\n",
        ") -\u003e tf.Tensor:\n",
        "  \"\"\"Computes L1 flow loss.\"\"\"\n",
        "  diff = true_flow - pred_flow\n",
        "  # Ignore predictions in areas where ground-truth flow is zero.\n",
        "  # [batch_size, height, width, 1], [batch_size, height, width, 1]\n",
        "  true_flow_dx, true_flow_dy = tf.split(true_flow, 2, axis=-1)\n",
        "  # [batch_size, height, width, 1]\n",
        "  flow_exists = tf.logical_or(\n",
        "      tf.not_equal(true_flow_dx, 0.0),\n",
        "      tf.not_equal(true_flow_dy, 0.0),\n",
        "  )\n",
        "  flow_exists = tf.cast(flow_exists, tf.float32)\n",
        "  diff = diff * flow_exists\n",
        "  diff_norm = tf.linalg.norm(diff, ord=1, axis=-1)  # L1 norm.\n",
        "  mean_diff = tf.math.divide_no_nan(\n",
        "      tf.reduce_sum(diff_norm),\n",
        "      tf.reduce_sum(flow_exists) / 2)  # / 2 since (dx, dy) is counted twice.\n",
        "  return loss_weight * mean_diff\n",
        "\n",
        "\n",
        "def _batch_flatten(input_tensor: tf.Tensor) -\u003e tf.Tensor:\n",
        "  \"\"\"Flatten tensor to a shape [batch_size, -1].\"\"\"\n",
        "  image_shape = tf.shape(input_tensor)\n",
        "  return tf.reshape(input_tensor, tf.concat([image_shape[0:1], [-1]], 0))"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wdOQTZAiuKdQ"
      },
      "source": [
        "# Sample training loop"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zu6Cf0pw0L4t"
      },
      "outputs": [],
      "source": [
        "def _run_model_on_inputs(\n",
        "    inputs: Dict[str, tf.Tensor],\n",
        "    training: bool,\n",
        ") -\u003e occupancy_flow_grids.WaypointGrids:\n",
        "  \"\"\"Preprocesses inputs and runs model on one batch.\"\"\"\n",
        "  inputs = occupancy_flow_data.add_sdc_fields(inputs)\n",
        "  timestep_grids = occupancy_flow_grids.create_ground_truth_timestep_grids(\n",
        "      inputs, config)\n",
        "  true_waypoints = occupancy_flow_grids.create_ground_truth_waypoint_grids(\n",
        "      timestep_grids, config)\n",
        "  vis_grids = occupancy_flow_grids.create_ground_truth_vis_grids(\n",
        "      inputs, timestep_grids, config)\n",
        "\n",
        "  # [batch_size, grid_height_cells, grid_width_cells, 23]\n",
        "  model_inputs = _make_model_inputs(timestep_grids, vis_grids)\n",
        "  # [batch_size, grid_height_cells, grid_width_cells, 32]\n",
        "  model_outputs = model(model_inputs, training=training)\n",
        "\n",
        "  pred_waypoint_logits = _get_pred_waypoint_logits(model_outputs)\n",
        "\n",
        "  return pred_waypoint_logits"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "b_5G9lx9uK9B"
      },
      "outputs": [],
      "source": [
        "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
        "\n",
        "def train_step(inputs: Dict[str, tf.Tensor]) -\u003e tf.Tensor:\n",
        "  with tf.GradientTape() as tape:\n",
        "    # Run model.\n",
        "    pred_waypoint_logits = _run_model_on_inputs(inputs=inputs, training=True)\n",
        "    # Compute loss.\n",
        "    loss_dict = _occupancy_flow_loss(\n",
        "        config=config,\n",
        "        true_waypoints=true_waypoints,\n",
        "        pred_waypoint_logits=pred_waypoint_logits)\n",
        "    total_loss = tf.math.add_n(loss_dict.values())\n",
        "\n",
        "  grads = tape.gradient(total_loss, model.trainable_weights)\n",
        "  optimizer.apply_gradients(zip(grads, model.trainable_weights))\n",
        "  return total_loss\n",
        "\n",
        "\n",
        "num_steps_to_train = 11\n",
        "step = 0\n",
        "while step \u003c num_steps_to_train:\n",
        "  # Iterate over batches of the dataset.\n",
        "  inputs = next(it)\n",
        "  loss_value = train_step(inputs)\n",
        "\n",
        "  # Log every 10 batches.\n",
        "  if step % 10 == 0:\n",
        "    float_loss = float(loss_value)\n",
        "    print(f'Training loss after step {step}: {float_loss:.4f}')\n",
        "\n",
        "  step += 1"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "m55jnj1Gjdgm"
      },
      "source": [
        "# Sample inference"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "kblvY1tTehNn"
      },
      "outputs": [],
      "source": [
        "def _apply_sigmoid_to_occupancy_logits(\n",
        "    pred_waypoint_logits: occupancy_flow_grids.WaypointGrids\n",
        ") -\u003e occupancy_flow_grids.WaypointGrids:\n",
        "  \"\"\"Converts occupancy logits to probabilities.\"\"\"\n",
        "  pred_waypoints = occupancy_flow_grids.WaypointGrids()\n",
        "  pred_waypoints.vehicles.observed_occupancy = [\n",
        "      tf.sigmoid(x) for x in pred_waypoint_logits.vehicles.observed_occupancy\n",
        "  ]\n",
        "  pred_waypoints.vehicles.occluded_occupancy = [\n",
        "      tf.sigmoid(x) for x in pred_waypoint_logits.vehicles.occluded_occupancy\n",
        "  ]\n",
        "  pred_waypoints.vehicles.flow = pred_waypoint_logits.vehicles.flow\n",
        "  return pred_waypoints"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "XgtqxpFCi-Cc"
      },
      "outputs": [],
      "source": [
        "# [batch_size, grid_height_cells, grid_width_cells, 23]\n",
        "model_inputs = _make_model_inputs(timestep_grids, vis_grids)\n",
        "# [batch_size, grid_height_cells, grid_width_cells, 32]\n",
        "model_outputs = model(model_inputs)\n",
        "pred_waypoint_logits = _get_pred_waypoint_logits(model_outputs)\n",
        "pred_waypoints = _apply_sigmoid_to_occupancy_logits(pred_waypoint_logits)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IPbRJtAmxaUN"
      },
      "source": [
        "## Visualize"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "kJvYvlx4yiY9"
      },
      "source": [
        "### Observed occupancy"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "lQo3ht4hjjvq"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  observed_occupancy_grids = pred_waypoints.get_observed_occupancy_at_waypoint(\n",
        "      k)\n",
        "  observed_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "      agent_grids=observed_occupancy_grids,\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      gamma=1.6,\n",
        "  )\n",
        "  images.append(observed_occupancy_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_ozXv_m3yHP1"
      },
      "source": [
        "### Occluded occupancy"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MSh4bHiUyJnY"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  occluded_occupancy_grids = pred_waypoints.get_occluded_occupancy_at_waypoint(\n",
        "      k)\n",
        "  occluded_occupancy_rgb = occupancy_flow_vis.occupancy_rgb_image(\n",
        "      agent_grids=occluded_occupancy_grids,\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      gamma=1.6,\n",
        "  )\n",
        "  images.append(observed_occupancy_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "N_NaulWWxu3q"
      },
      "source": [
        "### Flow"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Ue2HMuoDnzGQ"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  flow_rgb = occupancy_flow_vis.flow_rgb_image(\n",
        "      flow=pred_waypoints.vehicles.flow[k],\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      agent_trails=vis_grids.agent_trails,\n",
        "  )\n",
        "  images.append(flow_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "t0ncPUnkyVWw"
      },
      "source": [
        "### Joint occupancy-flow"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "t4ZdO_cgVoOl"
      },
      "outputs": [],
      "source": [
        "images = []\n",
        "for k in range(config.num_waypoints):\n",
        "  observed_occupancy_grids = pred_waypoints.get_observed_occupancy_at_waypoint(\n",
        "        k)\n",
        "  occupancy = observed_occupancy_grids.vehicles\n",
        "  flow = pred_waypoints.vehicles.flow[k]\n",
        "  occupancy_flow = occupancy * flow\n",
        "  flow_rgb = occupancy_flow_vis.flow_rgb_image(\n",
        "      flow=occupancy_flow,\n",
        "      roadgraph_image=vis_grids.roadgraph,\n",
        "      agent_trails=vis_grids.agent_trails,\n",
        "  )\n",
        "  images.append(flow_rgb[0])\n",
        "\n",
        "anim = create_animation(images, interval=200)\n",
        "HTML(anim.to_html5_video())"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9nHS7EJTAarV"
      },
      "source": [
        "# Metrics"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "gcC5qJJRL2i3"
      },
      "outputs": [],
      "source": [
        "metrics = occupancy_flow_metrics.compute_occupancy_flow_metrics(\n",
        "    config=config,\n",
        "    true_waypoints=true_waypoints,\n",
        "    pred_waypoints=pred_waypoints,\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "jgyRjFGySGb6"
      },
      "outputs": [],
      "source": [
        "print('Metrics:')\n",
        "print(metrics)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KmTcDNoIypAB"
      },
      "source": [
        "# Generate submission"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "b5yqPRRwzskg"
      },
      "source": [
        "## Submission proto\n",
        "\n",
        "\u003cfont color='red'\u003eNOTE: Please replace the values with your information.\u003c/font\u003e"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qmdffYJJzxpZ"
      },
      "outputs": [],
      "source": [
        "def _make_submission_proto(\n",
        ") -\u003e occupancy_flow_submission_pb2.ChallengeSubmission:\n",
        "  \"\"\"Makes a submission proto to store predictions for one shard.\"\"\"\n",
        "  submission = occupancy_flow_submission_pb2.ChallengeSubmission()\n",
        "  submission.account_name = 'me@gmail.com'\n",
        "  submission.unique_method_name = 'My method'\n",
        "  submission.authors.extend(['Author 1', 'Author 2', 'Author 3'])\n",
        "  submission.description = 'Description of my method'\n",
        "  submission.method_link = 'http://example.com/'\n",
        "  return submission"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IBEW4nvhKG-5"
      },
      "source": [
        "## Test set shards"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Mpt-Et3yKM4E"
      },
      "outputs": [],
      "source": [
        "test_shard_paths = tf.io.gfile.glob(TEST_FILES)\n",
        "print('All test shards:')\n",
        "print('\\n'.join(test_shard_paths))"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "A8q2vi1yMIn9"
      },
      "source": [
        "## Test scenario IDs"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "__k10fPFMKZf"
      },
      "outputs": [],
      "source": [
        "with tf.io.gfile.GFile(TEST_SCENARIO_IDS_FILE) as f:\n",
        "  test_scenario_ids = f.readlines()\n",
        "  test_scenario_ids = [id.rstrip() for id in test_scenario_ids]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "RuDoFGjSMXlO"
      },
      "outputs": [],
      "source": [
        "print('Got', len(test_scenario_ids), 'test scenario ids.')"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nTXBTisvy6F0"
      },
      "source": [
        "## Test dataset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "rDDfamZhy7ZG"
      },
      "outputs": [],
      "source": [
        "def _make_test_dataset(test_shard_path: str) -\u003e tf.data.Dataset:\n",
        "  \"\"\"Makes a dataset for one shard in the test set.\"\"\"\n",
        "  test_dataset = tf.data.TFRecordDataset(test_shard_path)\n",
        "  test_dataset = test_dataset.map(occupancy_flow_data.parse_tf_example)\n",
        "  test_dataset = test_dataset.batch(1)\n",
        "  return test_dataset"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bO7C-q0azugG"
      },
      "source": [
        "## Inference for one test set shard"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "NInsu1eK17wJ"
      },
      "outputs": [],
      "source": [
        "def _add_waypoints_to_scenario_prediction(\n",
        "    pred_waypoints: occupancy_flow_grids.WaypointGrids,\n",
        "    scenario_prediction: occupancy_flow_submission_pb2.ScenarioPrediction,\n",
        "    config: occupancy_flow_metrics_pb2.OccupancyFlowTaskConfig,\n",
        ") -\u003e None:\n",
        "  \"\"\"Add predictions for all waypoints to scenario_prediction message.\"\"\"\n",
        "  for k in range(config.num_waypoints):\n",
        "    waypoint_message = scenario_prediction.waypoints.add()\n",
        "    # Observed occupancy.\n",
        "    obs_occupancy = pred_waypoints.vehicles.observed_occupancy[k].numpy()\n",
        "    obs_occupancy_quantized = np.round(obs_occupancy * 255).astype(np.uint8)\n",
        "    obs_occupancy_bytes = zlib.compress(obs_occupancy_quantized.tobytes())\n",
        "    waypoint_message.observed_vehicles_occupancy = obs_occupancy_bytes\n",
        "    # Occluded occupancy.\n",
        "    occ_occupancy = pred_waypoints.vehicles.occluded_occupancy[k].numpy()\n",
        "    occ_occupancy_quantized = np.round(occ_occupancy * 255).astype(np.uint8)\n",
        "    occ_occupancy_bytes = zlib.compress(occ_occupancy_quantized.tobytes())\n",
        "    waypoint_message.occluded_vehicles_occupancy = occ_occupancy_bytes\n",
        "    # Flow.\n",
        "    flow = pred_waypoints.vehicles.flow[k].numpy()\n",
        "    flow_quantized = np.clip(np.round(flow), -128, 127).astype(np.int8)\n",
        "    flow_bytes = zlib.compress(flow_quantized.tobytes())\n",
        "    waypoint_message.all_vehicles_flow = flow_bytes"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Gm_QIRxOiAdl"
      },
      "outputs": [],
      "source": [
        "def _generate_predictions_for_one_test_shard(\n",
        "    submission: occupancy_flow_submission_pb2.ChallengeSubmission,\n",
        "    test_dataset: tf.data.Dataset,\n",
        "    test_scenario_ids: Sequence[str],\n",
        "    shard_message: str,\n",
        ") -\u003e None:\n",
        "  \"\"\"Iterate over all test examples in one shard and generate predictions.\"\"\"\n",
        "  for i, inputs in enumerate(test_dataset):\n",
        "    if inputs['scenario/id'] in test_scenario_ids:\n",
        "      print(f'Processing test shard {shard_message}, example {i}...')\n",
        "      # Run inference.\n",
        "      pred_waypoint_logits = _run_model_on_inputs(inputs=inputs, training=False)\n",
        "      pred_waypoints = _apply_sigmoid_to_occupancy_logits(pred_waypoint_logits)\n",
        "\n",
        "      # Make new scenario prediction message.\n",
        "      scenario_prediction = submission.scenario_predictions.add()\n",
        "      scenario_prediction.scenario_id = inputs['scenario/id'].numpy()[0]\n",
        "\n",
        "      # Add all waypoints.\n",
        "      _add_waypoints_to_scenario_prediction(\n",
        "          pred_waypoints=pred_waypoints,\n",
        "          scenario_prediction=scenario_prediction,\n",
        "          config=config)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5JaUADaE7Mpi"
      },
      "source": [
        "## Save to file"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MT8mmMMcoaXG"
      },
      "outputs": [],
      "source": [
        "def _save_submission_to_file(\n",
        "    submission: occupancy_flow_submission_pb2.ChallengeSubmission,\n",
        "    test_shard_path: str,\n",
        ") -\u003e None:\n",
        "  \"\"\"Save predictions for one test shard as a binary protobuf.\"\"\"\n",
        "  save_folder = os.path.join(pathlib.Path.home(),\n",
        "                             'occupancy_flow_challenge/testing')\n",
        "  os.makedirs(save_folder, exist_ok=True)\n",
        "  basename = os.path.basename(test_shard_path)\n",
        "  if 'testing_tfexample.tfrecord' not in basename:\n",
        "    raise ValueError('Cannot determine file path for saving submission.')\n",
        "  submission_basename = basename.replace('testing_tfexample.tfrecord',\n",
        "                                         'occupancy_flow_submission.binproto')\n",
        "  submission_shard_file_path = os.path.join(save_folder, submission_basename)\n",
        "  num_scenario_predictions = len(submission.scenario_predictions)\n",
        "  print(f'Saving {num_scenario_predictions} scenario predictions to '\n",
        "        f'{submission_shard_file_path}...\\n')\n",
        "  f = open(submission_shard_file_path, 'wb')\n",
        "  f.write(submission.SerializeToString())\n",
        "  f.close()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9tdSkj-gNheL"
      },
      "source": [
        "## Run (slow)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1BU34i9VNn7C"
      },
      "outputs": [],
      "source": [
        "for i, test_shard_path in enumerate(test_shard_paths):\n",
        "  print(f'Creating submission for test shard {test_shard_path}...')\n",
        "  test_dataset = _make_test_dataset(test_shard_path=test_shard_path)\n",
        "  submission = _make_submission_proto()\n",
        "  _generate_predictions_for_one_test_shard(\n",
        "      submission=submission,\n",
        "      test_dataset=test_dataset,\n",
        "      test_scenario_ids=test_scenario_ids,\n",
        "      shard_message=f'{i + 1} of {len(test_shard_paths)}')\n",
        "  _save_submission_to_file(\n",
        "      submission=submission, test_shard_path=test_shard_path)\n",
        "\n",
        "  if i == 0:\n",
        "    print('Sample scenario prediction:\\n')\n",
        "    print(submission.scenario_predictions[-1])"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IKYxqOQZQRxH"
      },
      "source": [
        "## Compress"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "heLC7eMqQUBc"
      },
      "outputs": [],
      "source": [
        "!tar czvf ~/occupancy_flow_challenge/submit_testing.tar.gz -C ~/occupancy_flow_challenge/testing ."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "J151Uv0UFTsM"
      },
      "source": [
        "## Submit\n",
        "\n",
        "Please see the [challenge website](https://waymo.com/open/challenges/2022/occupancy-flow-prediction-challenge/) for instructions on how to submit."
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "",
        "kind": "local"
      },
      "name": "Waymo Open Dataset Occupancy and Flow Tutorial",
      "private_outputs": true,
      "provenance": [
        {
          "file_id": "10669HUDVmyEtq7ac1-OAAwrBb5q9XZQ9",
          "timestamp": 1645066131576
        },
        {
          "file_id": "1jyOWS18MD9MxD4TTw_ZtegVxoB9BxD--",
          "timestamp": 1644697808186
        },
        {
          "file_id": "tutorial_occupancy_flow.ipynb",
          "timestamp": 1644187058298
        },
        {
          "file_id": "1VrSkEvjqNaShhQS1i3GlNqegLwqmqlcM",
          "timestamp": 1615354811513
        },
        {
          "file_id": "redacted",
          "timestamp": 1615333360862
        },
        {
          "file_id": "1FS9qXkF5DBPVobGCMwk_7ZgPUuf3YyWp",
          "timestamp": 1613686002912
        }
      ],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
