{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "C24AfPCtj4jY"
      },
      "outputs": [],
      "source": [
        "# @title Install dependencies {form-width: \"25%\"}\n",
        "\n",
        "!pip install mediapy"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "IIdiWPpgj-7F"
      },
      "outputs": [],
      "source": [
        "# @title Imports {form-width: \"25%\"}\n",
        "\n",
        "import cv2\n",
        "import einops\n",
        "import matplotlib\n",
        "from matplotlib import cm\n",
        "import matplotlib.pyplot as plt\n",
        "from matplotlib.figure import Figure\n",
        "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
        "from matplotlib.collections import LineCollection\n",
        "from mpl_toolkits.mplot3d.art3d import Line3DCollection\n",
        "import mediapy as media\n",
        "import numpy as np\n",
        "import plotly.graph_objects as go\n",
        "import seaborn as sns\n",
        "from sklearn.decomposition import PCA\n",
        "import tensorflow_datasets as tfds"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Ob_5_ouKkETK"
      },
      "outputs": [],
      "source": [
        "# @title Load dataset {form-width: \"25%\"}\n",
        "\n",
        "ds = tfds.load('movi_f/512x512', data_dir='gs://kubric-public/tfds', shuffle_files=False)\n",
        "ds = ds['train']\n",
        "dataset = tfds.as_numpy(ds)\n",
        "\n",
        "ds_iter = iter(dataset)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Fi4PnKJOVLUW"
      },
      "outputs": [],
      "source": [
        "# @title Load next batch {form-width: \"25%\"}\n",
        "\n",
        "sample = next(ds_iter)\n",
        "media.show_video(sample['video'], fps=10)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "e4XfQThYkXkt"
      },
      "outputs": [],
      "source": [
        "# @title Geometry functions {form-width: \"25%\"}\n",
        "\n",
        "def get_intrinsic(focal_length, sensor_width, width, height):\n",
        "  f_x = focal_length / sensor_width\n",
        "  sensor_height = sensor_width * width / height\n",
        "  f_y = focal_length / sensor_height\n",
        "  p_x = 1 / 2.\n",
        "  p_y = 1 / 2.\n",
        "  return np.array([\n",
        "      [f_x, 0, p_x],\n",
        "      [0, f_y, p_y],\n",
        "      [0, 0, 1],\n",
        "  ])\n",
        "\n",
        "def batch_quaternion_to_rotation_matrix(quaternions):\n",
        "  \"\"\"Convert a batch of quaternions to rotation matrices.\"\"\"\n",
        "  # Normalize the quaternions\n",
        "  quaternions = quaternions / np.linalg.norm(quaternions, axis=-1, keepdims=True)\n",
        "  q0, q1, q2, q3 = quaternions[..., 0], quaternions[..., 1], quaternions[..., 2], quaternions[..., 3]\n",
        "  # Compute rotation matrices using broadcasting\n",
        "  rot = np.zeros(quaternions.shape[:-1] + (3, 3))\n",
        "  rot[..., 0, 0] = 1 - 2 * (q2**2 + q3**2)\n",
        "  rot[..., 0, 1] = 2 * (q1*q2 - q0*q3)\n",
        "  rot[..., 0, 2] = 2 * (q0*q2 + q1*q3)\n",
        "  rot[..., 1, 0] = 2 * (q1*q2 + q0*q3)\n",
        "  rot[..., 1, 1] = 1 - 2 * (q1**2 + q3**2)\n",
        "  rot[..., 1, 2] = 2 * (q2*q3 - q0*q1)\n",
        "  rot[..., 2, 0] = 2 * (q1*q3 - q0*q2)\n",
        "  rot[..., 2, 1] = 2 * (q0*q1 + q2*q3)\n",
        "  rot[..., 2, 2] = 1 - 2 * (q1**2 + q2**2)\n",
        "  return rot\n",
        "\n",
        "def get_matrix_world(rotation, translation):\n",
        "  \"\"\"Single transformation matrix.\"\"\"\n",
        "  transform = np.eye(4)\n",
        "  transform[:3, :3] = rotation\n",
        "  transform[:3, 3] = translation\n",
        "  return transform\n",
        "\n",
        "def batch_get_matrix_world(rotations, translations):\n",
        "  \"\"\"Batch version of get_matrix_world.\"\"\"\n",
        "  transforms = np.zeros(rotations.shape[:-2] + (4, 4), dtype=np.float32)\n",
        "  transforms[..., :3, :3] = rotations\n",
        "  transforms[..., :3, 3] = translations\n",
        "  transforms[..., 3, 3] = 1\n",
        "  return transforms\n",
        "\n",
        "def image2camera(image_coords, depth, intrinsic, width, height):\n",
        "  \"\"\"Lift 2D image coordinate from [0, height/width] to camera coordinate.\"\"\"\n",
        "  normed = image_coords / np.array((width, height))\n",
        "  hom = np.concatenate([normed, np.ones_like(normed[..., :1])], axis=-1)\n",
        "  camera_plane = hom @ np.linalg.inv(intrinsic).T\n",
        "  return camera_plane * depth[..., None]\n",
        "\n",
        "def camera2world(rotation, translation, points3d):\n",
        "  \"\"\"Transform 3D points from camera coordinate to world coordinate.\"\"\"\n",
        "  matrix = get_matrix_world(rotation, translation)\n",
        "  points4d = np.concatenate([points3d, np.ones_like(points3d[..., :1])], axis=-1)\n",
        "  return (points4d @ np.linalg.inv(matrix).T)[..., :3]\n",
        "\n",
        "def world2camera(rotation, translation, points3d):\n",
        "  \"\"\"Transform 3D points from world coordinate to camera coordinate.\"\"\"\n",
        "  matrix = get_matrix_world(rotation, translation)\n",
        "  points4d = np.concatenate([points3d, np.ones_like(points3d[..., :1])], axis=-1)\n",
        "  return (points4d @ matrix.T)[..., :3]\n",
        "\n",
        "def camera2image(point3d, intrinsic):\n",
        "  \"\"\"Project 3D point in camera coordinate to [0, 1] image plane.\"\"\"\n",
        "  proj = point3d @ intrinsic.T\n",
        "  image_coords = proj[..., :2] / proj[..., 2:3]\n",
        "  z = proj[..., 2]\n",
        "  return image_coords, z\n",
        "\n",
        "def bilinear_interpolate(im, x, y):\n",
        "  \"\"\"Bilinear interpolation for 2D coordinates.\"\"\"\n",
        "  x0 = np.floor(x).astype(int)\n",
        "  x1 = x0 + 1\n",
        "  y0 = np.floor(y).astype(int)\n",
        "  y1 = y0 + 1\n",
        "\n",
        "  x0 = np.clip(x0, 0, im.shape[1] - 1)\n",
        "  x1 = np.clip(x1, 0, im.shape[1] - 1)\n",
        "  y0 = np.clip(y0, 0, im.shape[0] - 1)\n",
        "  y1 = np.clip(y1, 0, im.shape[0] - 1)\n",
        "\n",
        "  im_a = im[y0, x0]\n",
        "  im_b = im[y1, x0]\n",
        "  im_c = im[y0, x1]\n",
        "  im_d = im[y1, x1]\n",
        "\n",
        "  wa = (x1 - x) * (y1 - y)\n",
        "  wb = (x1 - x) * (y - y0)\n",
        "  wc = (x - x0) * (y1 - y)\n",
        "  wd = (x - x0) * (y - y0)\n",
        "\n",
        "  return wa * im_a + wb * im_b + wc * im_c + wd * im_d\n",
        "\n",
        "def batch_bilinear_interpolate(im, x, y):\n",
        "  \"\"\"Bilinear interpolation for batch of images.\"\"\"\n",
        "  x0 = np.floor(x).astype(int)\n",
        "  x1 = x0 + 1\n",
        "  y0 = np.floor(y).astype(int)\n",
        "  y1 = y0 + 1\n",
        "\n",
        "  x0 = np.clip(x0, 0, im.shape[-1] - 1)\n",
        "  x1 = np.clip(x1, 0, im.shape[-1] - 1)\n",
        "  y0 = np.clip(y0, 0, im.shape[-2] - 1)\n",
        "  y1 = np.clip(y1, 0, im.shape[-2] - 1)\n",
        "\n",
        "  b = np.arange(im.shape[0])[:, None, None]\n",
        "  im_a = im[b, y0, x0]\n",
        "  im_b = im[b, y1, x0]\n",
        "  im_c = im[b, y0, x1]\n",
        "  im_d = im[b, y1, x1]\n",
        "\n",
        "  wa = (x1 - x) * (y1 - y)\n",
        "  wb = (x1 - x) * (y - y0)\n",
        "  wc = (x - x0) * (y1 - y)\n",
        "  wd = (x - x0) * (y - y0)\n",
        "\n",
        "  return wa * im_a + wb * im_b + wc * im_c + wd * im_d\n",
        "\n",
        "def sample_grid_points(height, width, stride=1):\n",
        "  \"\"\"Return [H/stride, W/stride, 2] grid points with x,y order.\"\"\"\n",
        "  grid = np.mgrid[stride//2:height:stride, stride//2:width:stride].transpose(1, 2, 0)\n",
        "  return grid[..., ::-1]  # swap to (x, y)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wXEPyhY3VnLb"
      },
      "outputs": [],
      "source": [
        "# @title Prepare groundtruth data {form-width: \"25%\"}\n",
        "\n",
        "frames = sample['video']\n",
        "num_frames, height, width = frames.shape[:3]\n",
        "depth_range = sample['metadata']['depth_range']\n",
        "depths = sample['depth'][..., 0] / 65535 * (depth_range[1] - depth_range[0]) + depth_range[0]\n",
        "intrinsic = get_intrinsic(sample['camera']['focal_length'], sample['camera']['sensor_width'], width, height)\n",
        "masks = sample['segmentations'][..., 0]\n",
        "bboxes_3d = einops.rearrange(sample['instances']['bboxes_3d'], 'n t ... -\u003e t n ...')\n",
        "\n",
        "camera_rotations = batch_quaternion_to_rotation_matrix(sample['camera']['quaternions'])\n",
        "camera_rotations = camera_rotations @ np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])  # flip y and z axis\n",
        "camera_positions = sample['camera']['positions']\n",
        "camera_poses = batch_get_matrix_world(camera_rotations, camera_positions)\n",
        "camera_poses = np.linalg.inv(camera_poses)  # world to camera extinsics\n",
        "object_quaternions = einops.rearrange(sample['instances']['quaternions'], 'n t ... -\u003e t n ...')\n",
        "object_rotations = batch_quaternion_to_rotation_matrix(object_quaternions)\n",
        "object_positions = einops.rearrange(sample['instances']['positions'], 'n t ... -\u003e t n ...')\n",
        "object_poses = batch_get_matrix_world(object_rotations, object_positions)\n",
        "identity = np.tile(np.eye(4)[None, None], (num_frames, 1, 1, 1))\n",
        "object_poses = np.concatenate((identity, object_poses), axis=1)  # add background to object poses\n",
        "\n",
        "minv, maxv = sample[\"metadata\"][\"forward_flow_range\"]\n",
        "forward_flows = sample['forward_flow'] / 65535 * (maxv - minv) + minv\n",
        "forward_flows = forward_flows[..., ::-1]  # switch to [x, y]\n",
        "minv, maxv = sample[\"metadata\"][\"backward_flow_range\"]\n",
        "backward_flows = sample['backward_flow'] / 65535 * (maxv - minv) + minv\n",
        "backward_flows = -backward_flows[..., ::-1]  # switch to [x, y]\n",
        "surface_normals = sample['normal'] / 65535 * 2.0 - 1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "WCcn8GofV-PT"
      },
      "outputs": [],
      "source": [
        "# @title Visualize groundtruth z_buffer {form-width: \"25%\"}\n",
        "\n",
        "num_frames, height, width = frames.shape[0:3]\n",
        "x, y = np.meshgrid(np.arange(width), np.arange(height))\n",
        "projected_pt = np.stack([x, y, np.ones_like(x)], axis=-1) / np.array([width, height, 1])  # Shape: (height, width, 3)\n",
        "camera_plane = projected_pt @ np.linalg.inv(intrinsic).T # Shape: (height, width, 3)\n",
        "camera_ball = camera_plane / np.sqrt(np.sum(np.square(camera_plane), axis=-1, keepdims=True)) # Shape: (height, width, 3)\n",
        "camera_coords = camera_ball[None] * depths[..., None]  # Shape: (num_frames, height, width, 3)\n",
        "z_buffers = camera_coords[..., 2]  # Shape: (num_frames, height, width)\n",
        "\n",
        "\n",
        "fig = go.Figure()\n",
        "\n",
        "# Add the grayscale image as a heatmap\n",
        "fig.add_trace(go.Heatmap(\n",
        "    z=1/z_buffers[0, ::-1],\n",
        "    colorscale='magma',\n",
        "    colorbar=dict(title='Value'),\n",
        "    hovertemplate='X: %{x}\u003cbr\u003eY: %{y}\u003cbr\u003ePixel Value: %{z:.2f}\u003cextra\u003e\u003c/extra\u003e'\n",
        "))\n",
        "\n",
        "# Define layout with correct aspect ratio\n",
        "fig.update_layout(title='1/z_buffer', width=width, height=height)\n",
        "\n",
        "fig.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "B3Hiw4X2lRVD"
      },
      "outputs": [],
      "source": [
        "# @title Visualize different groundtruth data {form-width: \"25%\"}\n",
        "\n",
        "def segmentations_to_video(masks):\n",
        "  \"\"\"Converts a sequence of segmentation masks to color code video.\n",
        "\n",
        "  Args:\n",
        "    masks: [num_frames, height, width], np.uint8, [0, num_objects]\n",
        "\n",
        "  Returns:\n",
        "    video: [num_frames, height, width, 3], np.uint8, [0, 255]\n",
        "  \"\"\"\n",
        "  num_objects = np.max(masks)  # assume consecutive numbering\n",
        "  palette = [(0, 0, 0)] + sns.color_palette(n_colors=num_objects)\n",
        "  palette = [(int(c[0] * 255), int(c[1] * 255), int(c[2] * 255)) for c in palette]\n",
        "  video = np.zeros((masks.shape[0], masks.shape[1], masks.shape[2], 3), dtype=np.uint8)\n",
        "  for i in range(num_objects + 1):\n",
        "    video[masks == i] = palette[i]\n",
        "  return video\n",
        "\n",
        "def depths_to_video(depths):\n",
        "  \"\"\"Converts a sequence of depths to color code video.\n",
        "\n",
        "  Args:\n",
        "    depths: [num_frames, height, width], np.float32, [0, inf]\n",
        "\n",
        "  Returns:\n",
        "    video: [num_frames, height, width, 3], np.uint8, [0, 255]\n",
        "  \"\"\"\n",
        "  vmax = np.percentile(depths, 95)\n",
        "  normalizer = matplotlib.colors.Normalize(vmin=depths.min(), vmax=vmax)\n",
        "  mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')\n",
        "  video = np.zeros((depths.shape[0], depths.shape[1], depths.shape[2], 3),\n",
        "                   dtype=np.uint8)\n",
        "  for i in range(depths.shape[0]):\n",
        "    video[i] = (mapper.to_rgba(depths[i])[:, :, :3] * 255).astype(np.uint8)\n",
        "  return video\n",
        "\n",
        "def colored_depthmap(depth, d_min=None, d_max=None, invalid_value=None, colormap='Spectral'):\n",
        "  \"\"\"Converts a depth map to a colored image using a plasma colormap.\n",
        "\n",
        "  Args:\n",
        "    depth: The depth map (numpy array).\n",
        "    d_min: Minimum depth value (float, optional). Defaults to None (minimum in\n",
        "      depth).\n",
        "    d_max: Maximum depth value (float, optional). Defaults to None (maximum in\n",
        "      depth).\n",
        "\n",
        "  Returns:\n",
        "    The colored depth map as a numpy array of uint8 representing RGB channels.\n",
        "  \"\"\"\n",
        "  if d_min is None:\n",
        "    d_min = np.min(depth)\n",
        "  if d_max is None:\n",
        "    d_max = np.max(depth)\n",
        "  depth_relative = (depth - d_min) / (d_max - d_min)\n",
        "  cmap = plt.get_cmap(colormap)\n",
        "  depth_colored = 255 * cmap(depth_relative)[:, :, :3]  # H, W, C\n",
        "  depth_colored = depth_colored.astype(np.uint8)\n",
        "  depth_colored[depth == invalid_value] = 0\n",
        "  return depth_colored\n",
        "\n",
        "def depths_to_video(depths):\n",
        "  \"\"\"Converts a sequence of depths to color code video.\n",
        "\n",
        "  Args:\n",
        "    depths: [num_frames, height, width], np.float32, [0, inf]\n",
        "\n",
        "  Returns:\n",
        "    video: [num_frames, height, width, 3], np.uint8, [0, 255]\n",
        "  \"\"\"\n",
        "  vmin, vmax = np.percentile(depths, 5), np.percentile(depths, 95)\n",
        "  normalizer = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n",
        "  mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')\n",
        "  video = np.zeros((depths.shape[0], depths.shape[1], depths.shape[2], 3), dtype=np.uint8)\n",
        "  for i in range(depths.shape[0]):\n",
        "    video[i] = (mapper.to_rgba(depths[i])[:, :, :3] * 255).astype(np.uint8)\n",
        "  return video\n",
        "\n",
        "def plot_depth_prism(depthmaps):\n",
        "  depthmaps[depthmaps == 0] = np.nan\n",
        "  vmin, vmax = np.nanpercentile(depthmaps, [5, 95])\n",
        "  colored_d = np.stack([colored_depthmap(1/d, 1/vmax, 1/vmin, np.nan, 'turbo') for d in depthmaps], axis=0)\n",
        "  return colored_d\n",
        "\n",
        "def flow_to_rgb(vec, flow_mag_range=None, white_bg=False):\n",
        "  height, width = vec.shape[:2]\n",
        "  scaling = 50. / (height**2 + width**2)**0.5\n",
        "  direction = (np.arctan2(vec[..., 0], vec[..., 1]) + np.pi) / (2 * np.pi)\n",
        "  norm = np.linalg.norm(vec, axis=-1)\n",
        "  if flow_mag_range is None:\n",
        "    flow_mag_range = norm.min(), norm.max()\n",
        "  magnitude = np.clip((norm - flow_mag_range[0]) * scaling, 0., 1.)\n",
        "  if white_bg == True:\n",
        "    saturation = np.ones_like(direction)\n",
        "    hsv = np.stack([direction, magnitude, saturation], axis=-1)\n",
        "  else:\n",
        "    saturation = np.ones_like(direction)\n",
        "    hsv = np.stack([direction, saturation, magnitude], axis=-1)\n",
        "  rgb = matplotlib.colors.hsv_to_rgb(hsv)\n",
        "  return rgb\n",
        "\n",
        "def get_colormap(height, width):\n",
        "  \"\"\"Generates rainbow colormap for visualizing points.\"\"\"\n",
        "  color_map = matplotlib.colormaps.get_cmap('hsv')\n",
        "  cmap_norm = matplotlib.colors.Normalize(vmin=0, vmax=height - 1)\n",
        "  # Same as the for loop below\n",
        "  # colormap = np.zeros((height, width, 3))\n",
        "  # for i in range(height):\n",
        "  #   for j in range(width):\n",
        "  #     colormap[i, j] = np.array(color_map(cmap_norm(i)))[:3] * 255\n",
        "  indices = np.arange(height)\n",
        "  colors = color_map(cmap_norm(indices))[:, :3] * 255\n",
        "  colormap = np.tile(colors[:, None, :], (1, width, 1))\n",
        "  return colormap\n",
        "\n",
        "def render_pose(image,\n",
        "                object_pose,\n",
        "                camera_matrix,  # (3, 4)\n",
        "                arrow_length=0.05,\n",
        "                arrow_thickness=4,\n",
        "                arrow_tip_length=0.1):\n",
        "  \"\"\"Renders 3D poses onto the image plane, overriding the given image.\"\"\"\n",
        "  rotation, position = object_pose[:3, :3], object_pose[:3, 3]\n",
        "  height, width = image.shape[:2]\n",
        "  image_point = camera_matrix @ np.r_[position, 1.0]\n",
        "  image_point /= image_point[-1]\n",
        "  image_point *= np.array([width, height, 1])\n",
        "\n",
        "  colors = {'r': (255, 0, 0, 255), 'g': (0, 255, 0, 255), 'b': (0, 0, 255, 255)}\n",
        "  for i, c in enumerate('rgb'):\n",
        "    image_arrow_point = camera_matrix @ np.r_[position + rotation[:, i] * arrow_length, 1.0]\n",
        "    image_arrow_point /= image_arrow_point[-1]\n",
        "    image_arrow_point *= np.array([width, height, 1])\n",
        "    pt1 = (round(image_point[0]), round(image_point[1]))\n",
        "    pt2 = (round(image_arrow_point[0]), round(image_arrow_point[1]))\n",
        "    # black arrow background (to add a black border to the colored arrows)\n",
        "    cv2.arrowedLine(image, pt1, pt2, color=(0, 0, 0, 255), thickness=arrow_thickness + 1, line_type=cv2.LINE_AA)\n",
        "    # red/greeb/blue arrow\n",
        "    cv2.arrowedLine(image, pt1, pt2, color=colors[c], thickness=arrow_thickness, line_type=cv2.LINE_AA, tipLength=arrow_tip_length)\n",
        "  return image\n",
        "\n",
        "def poses_to_video(frames, object_poses, camera_poses, intrinsic):\n",
        "  intrinsic = np.concatenate([intrinsic, np.zeros((3, 1))], axis=1)\n",
        "\n",
        "  video = []\n",
        "  for t in range(object_poses.shape[0]):\n",
        "    camera_matrix = intrinsic @ camera_poses[t]\n",
        "\n",
        "    image_with_pose = frames[t].copy()\n",
        "    for i in range(object_poses.shape[1]):\n",
        "      image_with_pose = render_pose(image_with_pose, object_poses[t, i], camera_matrix, arrow_length=1)\n",
        "    video.append(image_with_pose)\n",
        "  video = np.stack(video)\n",
        "  return video\n",
        "\n",
        "def draw_projected_3d_bbox(image, proj_corners, proj_centers=None):\n",
        "  \"\"\"Draw a projected 3d bbox on a 2d image.\"\"\"\n",
        "  height, width = image.shape[0:2]\n",
        "  proj_corners = proj_corners[:, :, :2]  # we only need (x, y)\n",
        "  if proj_centers is not None:\n",
        "    proj_centers = proj_centers[:, :2]\n",
        "  corner_pairs = (\n",
        "      (0, 1),\n",
        "      (0, 2),\n",
        "      (2, 3),\n",
        "      (1, 3),\n",
        "      (4, 5),\n",
        "      (4, 6),\n",
        "      (6, 7),\n",
        "      (5, 7),\n",
        "      (0, 4),\n",
        "      (1, 5),\n",
        "      (2, 6),\n",
        "      (3, 7),\n",
        "  )\n",
        "  for i, proj_pt in enumerate(proj_corners):  # [8, 2]\n",
        "    for corner_pair in corner_pairs:\n",
        "      pt1, pt2 = proj_pt[corner_pair[0]], proj_pt[corner_pair[1]]\n",
        "      pt1 = (round(pt1[0] * width), round(pt1[1] * height))\n",
        "      pt2 = (round(pt2[0] * width), round(pt2[1] * height))\n",
        "      cv2.line(image, pt1, pt2, color=(0, 255, 0), thickness=1)\n",
        "    if proj_centers is not None:\n",
        "      pt = proj_centers[i]\n",
        "      pt = (round(pt[0] * width), round(pt[1] * height))\n",
        "      cv2.circle(image, pt, height // 100, color=(255, 0, 0), thickness=-1)\n",
        "  return image\n",
        "\n",
        "def bboxes_3d_to_video(frames, bboxes_3d, camera_poses, intrinsic):\n",
        "  \"\"\"Project 3D bboxes to 2D, then show them on an image.\"\"\"\n",
        "  # frames: [T, H, W, C]\n",
        "  # bboxes_3d: [T, N, 8, 3]\n",
        "  # cameras: a dict with camera metadata\n",
        "  bboxes_center_3d = bboxes_3d.mean(-2)\n",
        "  camera_rotations, camera_positions = camera_poses[..., :3, :3], camera_poses[..., :3, 3]\n",
        "\n",
        "  images = frames.copy()\n",
        "  for t in range(frames.shape[0]):\n",
        "    proj_camera_coords = world2camera(camera_rotations[t], camera_positions[t], bboxes_3d[t])\n",
        "    proj_corners, _ = camera2image(proj_camera_coords, intrinsic)\n",
        "    proj_camera_coords = world2camera(camera_rotations[t], camera_positions[t], bboxes_center_3d[t])\n",
        "    proj_centers, _ = camera2image(proj_camera_coords, intrinsic)\n",
        "    images[t] = draw_projected_3d_bbox(image=images[t], proj_corners=proj_corners, proj_centers=proj_centers)\n",
        "  return images\n",
        "\n",
        "media.show_videos({\"rgb\": frames,\n",
        "                   \"segmentation\": segmentations_to_video(masks),\n",
        "                   \"bboxes_3d\": bboxes_3d_to_video(frames, bboxes_3d, camera_poses, intrinsic),\n",
        "                   \"poses\": poses_to_video(frames, object_poses, camera_poses, intrinsic),\n",
        "                   \"object_coordinates\": sample[\"object_coordinates\"],\n",
        "                   \"depth\": depths_to_video(depths),\n",
        "                   \"z_buffer\": plot_depth_prism(z_buffers),\n",
        "                   \"forward_flow\": flow_to_rgb(forward_flows, white_bg=False),\n",
        "                   \"backward_flow\": flow_to_rgb(backward_flows, white_bg=False),\n",
        "                   \"surface_normal\": sample[\"normal\"],\n",
        "                   },\n",
        "                fps=10,\n",
        "                columns=5,\n",
        "                codec='gif',\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "N5jbkpnOS5QY"
      },
      "outputs": [],
      "source": [
        "# @title Get forward point tracks in 2D and 3D {form-width: \"25%\"}\n",
        "\n",
        "def forward_point_tracks_to_video(frames, point_tracks, visibles, show_occ=False):\n",
        "  \"\"\"Converts a sequence of points to color code video.\n",
        "\n",
        "  Args:\n",
        "    frames: [num_frames, height, width, 3], np.uint8, [0, 255]\n",
        "    point_tracks: [num_frames, height, width, 2], np.float32, [0, width / height]\n",
        "    visibles: [num_frames, height, width], bool\n",
        "\n",
        "  Returns:\n",
        "    video: [num_frames, height, width, 3], np.uint8, [0, 255]\n",
        "  \"\"\"\n",
        "  num_frames, height, width = frames.shape[0:3]\n",
        "  colormap = get_colormap(height, width)\n",
        "\n",
        "  video = frames.copy()\n",
        "  for t in range(num_frames):\n",
        "    for i in range(height):\n",
        "      for j in range(width):\n",
        "        x, y = np.round(point_tracks[t, i, j, :2]).astype(np.int32)\n",
        "        if visibles[t, i, j]:\n",
        "          cv2.circle(video[t], (x, y), radius=1, color=colormap[i, j], thickness=-1)\n",
        "        elif show_occ:\n",
        "          cv2.circle(video[t], (x, y), radius=1, color=colormap[i, j], thickness=1)\n",
        "  return video\n",
        "\n",
        "def visualize_pca(feature_maps):\n",
        "  _, height, width, channels = feature_maps.shape\n",
        "\n",
        "  pca = PCA(n_components=3)\n",
        "  pca = pca.fit(feature_maps.reshape(-1, channels))  # PCA on first frame\n",
        "\n",
        "  pca_video = []\n",
        "  for i in range(feature_maps.shape[0]):\n",
        "    feature_map_pca = pca.transform(feature_maps[i].reshape(-1, channels))\n",
        "    feature_map_pca = feature_map_pca.reshape(height, width, 3)\n",
        "    min_value = feature_map_pca.min(axis=(0, 1))\n",
        "    max_value = feature_map_pca.max(axis=(0, 1))\n",
        "    feature_map_pca = (feature_map_pca - min_value) / (max_value - min_value)\n",
        "    pca_video.append(feature_map_pca)\n",
        "  pca_video = np.stack(pca_video, axis=0)\n",
        "  return pca_video\n",
        "\n",
        "\n",
        "num_frames, height, width = frames.shape[0:3]\n",
        "query_points = sample_grid_points(height, width)  # Shape: (height, width, 2)\n",
        "\n",
        "poses = camera_poses[:, None] @ object_poses  # Shape: (num_frames, num_objects, 4, 4)\n",
        "relative_poses = np.einsum('tkcd, kde -\u003e tkce', poses, np.linalg.inv(poses[0]))\n",
        "num_objects = relative_poses.shape[1]\n",
        "one_hot_masks = (masks[0][None, None, ...] == np.arange(num_objects)[None, :, None, None])\n",
        "dense_poses = np.einsum('tkhw, tkce -\u003e thwce', one_hot_masks, relative_poses)  # Shape: (num_frames, height, width, 4, 4)\n",
        "\n",
        "camera_coords = image2camera(query_points, z_buffers[0], intrinsic, width, height)  # Shape: (height, width, 3)\n",
        "points4d = np.concatenate([camera_coords, np.ones_like(camera_coords[..., :1])], axis=-1)  # Homogeneous\n",
        "proj_camera_coords = np.einsum('thwcd, hwd -\u003e thwc', dense_poses, points4d)  # Shape: (num_frames, height, width, 4)\n",
        "\n",
        "image_coords_xy, image_coords_z = camera2image(proj_camera_coords[..., :3], intrinsic)  # Shape: (num_frames, height, width, 2)\n",
        "image_coords_xy *= np.array([width, height])  # Scale to pixel dimensions\n",
        "\n",
        "# Visibility check\n",
        "interpolate_z_buffers = batch_bilinear_interpolate(z_buffers, image_coords_xy[..., 0], image_coords_xy[..., 1])\n",
        "visible = (image_coords_z \u003c= interpolate_z_buffers * 1.01 ) \u0026 \\\n",
        "    (image_coords_xy[..., 0] \u003e= 0) \u0026 (image_coords_xy[..., 0] \u003c width) \u0026 \\\n",
        "    (image_coords_xy[..., 1] \u003e= 0) \u0026 (image_coords_xy[..., 1] \u003c height)\n",
        "\n",
        "gt_tracks_forward = image_coords_xy\n",
        "gt_visibles_forward = visible\n",
        "gt_tracks_xyz_forward = proj_camera_coords[..., :3]\n",
        "\n",
        "media.show_videos({\"forward_point_tracks\": forward_point_tracks_to_video(frames, gt_tracks_forward, gt_visibles_forward),\n",
        "                   \"dense_relative_poses\": visualize_pca(dense_poses.reshape(num_frames, height, width, -1)),\n",
        "                   },\n",
        "                  fps=10,\n",
        "                  codec='gif',\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PeMZLN5Kqvlk"
      },
      "outputs": [],
      "source": [
        "# @title Visualize groundtruth 2D tracks {form-width: \"25%\"}\n",
        "\n",
        "def plot_2d_tracks(video, points, visibles, infront_cameras=None, tracks_leave_trace=16, show_occ=False):\n",
        "  \"\"\"Visualize 2D point trajectories.\"\"\"\n",
        "  num_frames, num_points = points.shape[:2]\n",
        "\n",
        "  # Precompute colormap for points\n",
        "  color_map = matplotlib.colormaps.get_cmap('hsv')\n",
        "  cmap_norm = matplotlib.colors.Normalize(vmin=0, vmax=num_points - 1)\n",
        "  point_colors = np.zeros((num_points, 3))\n",
        "  for i in range(num_points):\n",
        "    point_colors[i] = np.array(color_map(cmap_norm(i)))[:3] * 255\n",
        "\n",
        "  if infront_cameras is None:\n",
        "    infront_cameras = np.ones_like(visibles).astype(bool)\n",
        "\n",
        "  frames = []\n",
        "  for t in range(num_frames):\n",
        "    frame = video[t].copy()\n",
        "\n",
        "    # Draw tracks on the frame\n",
        "    line_tracks = points[max(0, t - tracks_leave_trace) : t + 1]\n",
        "    line_visibles = visibles[max(0, t - tracks_leave_trace) : t + 1]\n",
        "    line_infront_cameras = infront_cameras[max(0, t - tracks_leave_trace) : t + 1]\n",
        "    for s in range(line_tracks.shape[0] - 1):\n",
        "      img = frame.copy()\n",
        "\n",
        "      for i in range(num_points):\n",
        "        if line_visibles[s, i] and line_visibles[s + 1, i]:  # visible\n",
        "          x1, y1 = int(round(line_tracks[s, i, 0])), int(round(line_tracks[s, i, 1]))\n",
        "          x2, y2 = int(round(line_tracks[s + 1, i, 0])), int(round(line_tracks[s + 1, i, 1]))\n",
        "          cv2.line(frame, (x1, y1), (x2, y2), point_colors[i], 1, cv2.LINE_AA)\n",
        "        elif show_occ and line_infront_cameras[s, i] and line_infront_cameras[s + 1, i]:  # occluded\n",
        "          x1, y1 = int(round(line_tracks[s, i, 0])), int(round(line_tracks[s, i, 1]))\n",
        "          x2, y2 = int(round(line_tracks[s + 1, i, 0])), int(round(line_tracks[s + 1, i, 1]))\n",
        "          cv2.line(frame, (x1, y1), (x2, y2), point_colors[i], 1, cv2.LINE_AA)\n",
        "\n",
        "      alpha = (s + 1) / (line_tracks.shape[0] - 1)\n",
        "      frame = cv2.addWeighted(frame, alpha, img, 1 - alpha, 0)\n",
        "\n",
        "    # Draw end points on the frame\n",
        "    for i in range(num_points):\n",
        "      if visibles[t, i]:  # visible\n",
        "        x, y = int(round(points[t, i, 0])), int(round(points[t, i, 1]))\n",
        "        cv2.circle(frame, (x, y), 2, point_colors[i], -1, cv2.LINE_AA)\n",
        "      elif show_occ and infront_cameras[t, i]:  # occluded\n",
        "        x, y = int(round(points[t, i, 0])), int(round(points[t, i, 1]))\n",
        "        cv2.circle(frame, (x, y), 2, point_colors[i], 1, cv2.LINE_AA)\n",
        "\n",
        "    frames.append(frame)\n",
        "  frames = np.stack(frames)\n",
        "  return frames\n",
        "\n",
        "num_frames, height, width = frames.shape[:3]\n",
        "\n",
        "grid = sample_grid_points(height, width, 8)\n",
        "grid = grid.reshape(-1, 2)\n",
        "\n",
        "tracks = gt_tracks_forward[:, grid[:, 1], grid[:, 0]]\n",
        "visibles = gt_visibles_forward[:, grid[:, 1], grid[:, 0]]\n",
        "tracks = tracks.reshape(num_frames, -1, 2)\n",
        "visibles = visibles.reshape(num_frames, -1)\n",
        "\n",
        "video2d_viz = plot_2d_tracks(frames, tracks, visibles)\n",
        "media.show_video(video2d_viz, fps=10)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "iXlsbeQ5q_Xe"
      },
      "outputs": [],
      "source": [
        "# @title Visualize camera coordinate 3D tracks {form-width: \"25%\"}\n",
        "\n",
        "def plot_3d_tracks(points, visibles, infront_cameras=None, tracks_leave_trace=16, show_occ=False):\n",
        "  \"\"\"Visualize 3D point trajectories.\"\"\"\n",
        "  num_frames, num_points = points.shape[0:2]\n",
        "\n",
        "  color_map = matplotlib.colormaps.get_cmap('hsv')\n",
        "  cmap_norm = matplotlib.colors.Normalize(vmin=0, vmax=num_points - 1)\n",
        "\n",
        "  if infront_cameras is None:\n",
        "    infront_cameras = np.ones_like(visibles).astype(bool)\n",
        "\n",
        "  if show_occ:\n",
        "    x_min, x_max = np.min(points[infront_cameras, 0]), np.max(points[infront_cameras, 0])\n",
        "    y_min, y_max = np.min(points[infront_cameras, 2]), np.max(points[infront_cameras, 2])\n",
        "    z_min, z_max = np.min(points[infront_cameras, 1]), np.max(points[infront_cameras, 1])\n",
        "  else:\n",
        "    x_min, x_max = np.min(points[visibles, 0]), np.max(points[visibles, 0])\n",
        "    y_min, y_max = np.min(points[visibles, 2]), np.max(points[visibles, 2])\n",
        "    z_min, z_max = np.min(points[visibles, 1]), np.max(points[visibles, 1])\n",
        "\n",
        "  interval = np.max([x_max - x_min, y_max - y_min, z_max - z_min])\n",
        "  x_min = (x_min + x_max) / 2 - interval / 2\n",
        "  x_max = x_min + interval\n",
        "  y_min = (y_min + y_max) / 2 - interval / 2\n",
        "  y_max = y_min + interval\n",
        "  z_min = (z_min + z_max) / 2 - interval / 2\n",
        "  z_max = z_min + interval\n",
        "\n",
        "  frames = []\n",
        "  for t in range(num_frames):\n",
        "    fig = Figure(figsize=(5.12, 5.12))\n",
        "    canvas = FigureCanvasAgg(fig)\n",
        "    ax = fig.add_subplot(111, projection='3d', computed_zorder=False)\n",
        "\n",
        "    ax.set_xlim([x_min, x_max])\n",
        "    ax.set_ylim([y_min, y_max])\n",
        "    ax.set_zlim([z_min, z_max])\n",
        "\n",
        "    ax.set_xticklabels([])\n",
        "    ax.set_yticklabels([])\n",
        "    ax.set_zticklabels([])\n",
        "\n",
        "    ax.invert_zaxis()\n",
        "    ax.view_init()\n",
        "\n",
        "    for i in range(num_points):\n",
        "      if visibles[t, i] or (show_occ and infront_cameras[t, i]):\n",
        "        color = color_map(cmap_norm(i))\n",
        "        line = points[max(0, t - tracks_leave_trace) : t + 1, i]\n",
        "        ax.plot(xs=line[:, 0], ys=line[:, 2], zs=line[:, 1], color=color, linewidth=1)\n",
        "        end_point = points[t, i]\n",
        "        ax.scatter(xs=end_point[0], ys=end_point[2], zs=end_point[1], color=color, s=3)\n",
        "\n",
        "    fig.subplots_adjust(left=-0.05, right=1.05, top=1.05, bottom=-0.05)\n",
        "    fig.canvas.draw()\n",
        "    frames.append(canvas.buffer_rgba())\n",
        "    plt.close(fig)\n",
        "  return np.array(frames)[..., :3]\n",
        "\n",
        "num_frames, height, width = frames.shape[:3]\n",
        "\n",
        "grid = sample_grid_points(height, width, 8)\n",
        "grid = grid.reshape(-1, 2)\n",
        "\n",
        "tracks_xyz = gt_tracks_xyz_forward[:, grid[:, 1], grid[:, 0]]\n",
        "tracks_xyz = tracks_xyz.reshape(num_frames, -1, 3)\n",
        "visibles =  np.ones(tracks_xyz.shape[0:2]).astype(bool)\n",
        "\n",
        "video3d_viz = plot_3d_tracks(tracks_xyz, visibles, show_occ=True)\n",
        "media.show_video(video3d_viz, fps=10)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "6meH0uKgqRfq"
      },
      "outputs": [],
      "source": [
        "# @title Get forward point tracks in world coordinates with first frame as world frame {form-width: \"25%\"}\n",
        "\n",
        "camera_poses_adjusted = np.einsum('tcd, de -\u003e tce', camera_poses, np.linalg.inv(camera_poses[0]))\n",
        "\n",
        "num_frames, height, width = frames.shape[0:3]\n",
        "query_points = sample_grid_points(height, width)  # Shape: (height, width, 2)\n",
        "\n",
        "relative_object_poses = np.einsum('tkcd, kde -\u003e tkce', object_poses, np.linalg.inv(object_poses[0]))\n",
        "relative_object_poses = np.einsum('tkcd, de -\u003e tkce', relative_object_poses, np.linalg.inv(camera_poses[0]))\n",
        "relative_object_poses = np.einsum('cd, tkde -\u003e tkce', camera_poses[0], relative_object_poses)\n",
        "num_objects = relative_object_poses.shape[1]\n",
        "one_hot_masks = (masks[0][None, None, ...] == np.arange(num_objects)[None, :, None, None])\n",
        "dense_object_poses = np.einsum('tkhw, tkce -\u003e thwce', one_hot_masks, relative_object_poses)  # Shape: (num_frames, height, width, 4, 4)\n",
        "\n",
        "camera_coords = image2camera(query_points, z_buffers[0], intrinsic, width, height)  # Shape: (height, width, 3)\n",
        "points4d = np.concatenate([camera_coords, np.ones_like(camera_coords[..., :1])], axis=-1)  # Homogeneous\n",
        "points4d = np.einsum('cd, hwd -\u003e hwc', np.linalg.inv(camera_poses_adjusted[0]), points4d)  # World coordinates\n",
        "proj_world_coords = np.einsum('thwcd, hwd -\u003e thwc', dense_object_poses, points4d)  # Shape: (num_frames, height, width, 4)\n",
        "proj_camera_coords = np.einsum('tcd, thwd -\u003e thwc', camera_poses_adjusted, proj_world_coords)  # World coordinates\n",
        "\n",
        "image_coords_xy, image_coords_z = camera2image(proj_camera_coords[..., :3], intrinsic)  # Shape: (num_frames, height, width, 2)\n",
        "image_coords_xy *= np.array([width, height])  # Scale to pixel dimensions\n",
        "\n",
        "# Visibility check\n",
        "interpolate_z_buffers = batch_bilinear_interpolate(z_buffers, image_coords_xy[..., 0], image_coords_xy[..., 1])\n",
        "visible = (image_coords_z \u003c= interpolate_z_buffers * 1.01 ) \u0026 \\\n",
        "    (image_coords_xy[..., 0] \u003e= 0) \u0026 (image_coords_xy[..., 0] \u003c width) \u0026 \\\n",
        "    (image_coords_xy[..., 1] \u003e= 0) \u0026 (image_coords_xy[..., 1] \u003c height)\n",
        "\n",
        "gt_tracks_forward = image_coords_xy\n",
        "gt_visibles_forward = visible\n",
        "gt_tracks_xyz_forward = proj_camera_coords[..., :3]\n",
        "gt_tracks_xyz_world_forward = proj_world_coords[..., :3]\n",
        "\n",
        "media.show_videos({\"forward_point_tracks\": forward_point_tracks_to_video(frames, gt_tracks_forward, gt_visibles_forward),\n",
        "                   \"dense_relative_poses\": visualize_pca(dense_object_poses.reshape(num_frames, height, width, -1)),\n",
        "                   },\n",
        "                  fps=10,\n",
        "                  codec='gif',\n",
        ")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "KJ3vm4BbwAu3"
      },
      "outputs": [],
      "source": [
        "# @title visualize world coordinate 3D tracks with first frame as world frame\n",
        "\n",
        "def draw_camera_pose(ax, M, fov=60, aspect=1.0, far=0.1):\n",
        "    camera_pos, rotation = M[:3, 3], M[:3, :3]\n",
        "    fov_rad = np.deg2rad(fov / 2)\n",
        "    far_height, far_width = 2 * np.tan(fov_rad) * far, 2 * np.tan(fov_rad) * far * aspect\n",
        "    # Define far corners\n",
        "    far_corners = np.array([[far_width/2, far_height/2, far], [-far_width/2, far_height/2, far],\n",
        "                            [-far_width/2, -far_height/2, far], [far_width/2, -far_height/2, far]])\n",
        "    # Transform far corners into world coordinates\n",
        "    frustum_corners_world = (rotation @ far_corners.T).T + camera_pos\n",
        "    # Define edges of the far plane\n",
        "    far_lines = [[camera_pos, frustum_corners_world[i]] for i in range(4)]\n",
        "    far_edges = [[frustum_corners_world[i], frustum_corners_world[(i + 1) % 4]] for i in range(4)]\n",
        "    # Create the frustum lines\n",
        "    frustum_lines = np.array(far_lines + far_edges)[:, :, [0, 2, 1]]  # Swap y and z axes\n",
        "    ax.add_collection3d(Line3DCollection(frustum_lines, colors=[0.0, 0.0, 0.7], linewidths=1))\n",
        "\n",
        "def plot_3d_world_tracks(points, visibles, cameras, tracks_leave_trace=16):\n",
        "  \"\"\"Visualize 3D point trajectories with camera trajectory.\"\"\"\n",
        "  num_frames, num_points = points.shape[:2]\n",
        "  points = points[..., [0,2,1]]\n",
        "  point_color_map = matplotlib.colormaps.get_cmap('hsv')\n",
        "  x_min, x_max = np.nanpercentile(points[visibles, 0], 5), np.nanpercentile(points[visibles, 0], 95)\n",
        "  y_min, y_max = np.nanpercentile(points[visibles, 1], 5), np.nanpercentile(points[visibles, 1], 95)\n",
        "  z_min, z_max = np.nanpercentile(points[visibles, 2], 5), np.nanpercentile(points[visibles, 2], 95)\n",
        "  interval = max(x_max - x_min, y_max - y_min, z_max - z_min) * 1.11\n",
        "  x_mid, y_mid, z_mid = (x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2\n",
        "  x_min, x_max, y_min, y_max, z_min, z_max = [x_mid - interval / 2, x_mid + interval / 2, y_mid - interval / 2, y_mid + interval / 2, z_mid - interval / 2, z_mid + interval / 2]\n",
        "\n",
        "  M_list = np.linalg.inv(cameras)  # camera to world extrinsics\n",
        "  camera_positions = M_list[:, :3, 3][:, [0, 2, 1]]\n",
        "\n",
        "  frames = []\n",
        "  for t in range(num_frames):\n",
        "    fig = Figure(figsize=(5.12, 5.12), dpi=100)\n",
        "    canvas = FigureCanvasAgg(fig)\n",
        "    ax = fig.add_subplot(111, projection='3d', computed_zorder=False)\n",
        "    ax.set_xlim([x_min, x_max]), ax.set_ylim([y_min, y_max]), ax.set_zlim([z_min, z_max])\n",
        "    ax.set_xticklabels([]), ax.set_yticklabels([]), ax.set_zticklabels([])\n",
        "    ax.invert_zaxis(), ax.view_init()\n",
        "\n",
        "    path_pos = camera_positions[0:np.min((t+20, num_frames))]\n",
        "    ax.plot(path_pos[..., 0], path_pos[..., 1], path_pos[..., 2], color=[0.0, 0.0, 0.7], linestyle='dashed')\n",
        "    draw_camera_pose(ax, M_list[t], far=interval/10)\n",
        "\n",
        "    indices = np.where(visibles[t, :])[0]\n",
        "    if indices.size \u003e 0:\n",
        "      lines = np.transpose(points[max(0, t - tracks_leave_trace):t+1, indices], (1, 0, 2))\n",
        "      line_collection = Line3DCollection(lines, colors=point_color_map(matplotlib.colors.Normalize(vmin=0, vmax=num_points - 1)(indices)), linewidths=1)\n",
        "      ax.add_collection3d(line_collection)\n",
        "      ax.scatter(points[t, indices, 0], points[t, indices, 1], points[t, indices, 2], c=point_color_map(matplotlib.colors.Normalize(vmin=0, vmax=num_points - 1)(indices)), s=3)\n",
        "    fig.subplots_adjust(left=-0.05, right=1.05, top=1.05, bottom=-0.05)\n",
        "    fig.canvas.draw()\n",
        "    frames.append(np.array(canvas.buffer_rgba(), dtype=np.float32) / 255.)\n",
        "    plt.close(fig)\n",
        "  return np.array(frames)[..., :3]\n",
        "\n",
        "num_frames, height, width = frames.shape[:3]\n",
        "\n",
        "grid = sample_grid_points(height, width, 8)\n",
        "grid = grid.reshape(-1, 2)\n",
        "\n",
        "tracks_xyz = gt_tracks_xyz_world_forward[:, grid[:, 1], grid[:, 0]]\n",
        "tracks_xyz = tracks_xyz.reshape(num_frames, -1, 3)\n",
        "visibles =  np.ones(tracks_xyz.shape[0:2]).astype(bool)\n",
        "\n",
        "video3d_viz = plot_3d_world_tracks(tracks_xyz, visibles, camera_poses_adjusted)\n",
        "media.show_video(video3d_viz, fps=10)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "twkMr8wlXbP4"
      },
      "outputs": [],
      "source": []
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "gpuType": "A100",
      "provenance": [
        {
          "file_id": "1KVXKeqrGX9CykD1JgA_1CbkkJeW4_37w",
          "timestamp": 1748869366472
        },
        {
          "file_id": "1jhZzc5aCUKonwE4UgiDsQ2kMulSOpfXg",
          "timestamp": 1748802891216
        },
        {
          "file_id": "1rTvubo766ZbobtuxMUr71Tv3GRHIGU7L",
          "timestamp": 1738632318372
        },
        {
          "file_id": "1_b3vfGKpDragXWG5Mg0CQeOHMiAqe8ku",
          "timestamp": 1737843391439
        }
      ],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
