{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Nov4wrdiKHTh"
      },
      "source": [
        ""
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "executionInfo": {
          "elapsed": 56,
          "status": "ok",
          "timestamp": 1739417525620,
          "user": {
            "displayName": "",
            "userId": ""
          },
          "user_tz": 300
        },
        "id": "mCmDvfFvxnGB",
        "outputId": "76fa908f-1da1-442f-f3d2-59071052d274"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "/bin/sh: line 1: pip: command not found\n"
          ]
        }
      ],
      "source": [
        "# @title Install Code and Dependencies {form-width: \"25%\"}\n",
        "!pip install git+https://github.com/google-deepmind/tapnet.git"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VADLwbmxaG29"
      },
      "outputs": [],
      "source": [
        "# @title Download TAPVid-DAVIS Dataset {form-width: \"25%\"}\n",
        "!wget --no-check-certificate https://storage.googleapis.com/dm-tapnet/tapvid_davis.zip\n",
        "!unzip tapvid_davis.zip"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "HaswJZMq9B3c"
      },
      "outputs": [],
      "source": [
        "# @title Download Model {form-width: \"25%\"}\n",
        "\n",
        "%mkdir -p tapnet/checkpoints\n",
        "!wget -P tapnet/checkpoints --no-check-certificate https://storage.googleapis.com/dm-tapnet/tapnext/tapnext_ckpt.npz\n",
        "%ls tapnet/checkpoints"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "hICS3HPqcxU_"
      },
      "outputs": [],
      "source": [
        "# @title Load TAP-Vid-DAVIS dataset on 256x256 {form-width: \"25%\"}\n",
        "davis_dataset = evaluation_datasets.create_davis_dataset(\n",
        "    davis_points_path='tapvid_davis/tapvid_davis.pkl', query_mode='first', full_resolution=False, resolution=(256, 256))\n",
        "\n",
        "cached_dataset = []\n",
        "for j, batch in enumerate(davis_dataset):\n",
        "  cached_dataset.append(batch)\n",
        "  print('video id', j, jax.tree_util.tree_map(lambda x: x.shape, batch))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "BTUv4km7J514"
      },
      "outputs": [],
      "source": [
        "compute_dtype = \"float32\"\n",
        "\n",
        "ssm_vit_backbone = ssm_vit.Model(\n",
        "    # try S/16, B/8, L/16\n",
        "    variant=\"B/8\",\n",
        "    patch_size=(1, 8, 8),\n",
        "    pool_type=\"queries\",\n",
        "    posemb=\"learn\",\n",
        "    posemb_full=\"sincos2d\",\n",
        "    rep_size=True,\n",
        "    dropout=0.0,\n",
        "    lru_width=768,\n",
        "    remat=False,\n",
        "    dtype_mm=compute_dtype,\n",
        "    dtype_ssm=compute_dtype,\n",
        "    query_scale=1,\n",
        "    spatiotemporal_attn=False,\n",
        ")\n",
        "\n",
        "\n",
        " # pytype: disable=bad-return-type\n",
        "model = video_ssm_tracker.TAPNextTracker(\n",
        "      backbone=ssm_vit_backbone,\n",
        "  )"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YAvcpMST4g95"
      },
      "outputs": [],
      "source": [
        "def tracker_forward(params, frame, queries=None, state=None):\n",
        "  \"\"\"Forward function.\n",
        "\n",
        "  Assuming all queries are in frame 0, then the first forward\n",
        "  step only includes queries and not the state, subsquent forward\n",
        "  calls include the state but not queries.\n",
        "  \"\"\"\n",
        "  if state is None:\n",
        "    # assme all query points are valid\n",
        "    assert queries is not None\n",
        "    query_padding = jnp.ones_like(queries)[..., 0]\n",
        "    result, _ = model.apply(\n",
        "      variables={'params': params},\n",
        "      frames=frame,\n",
        "      query_points=queries,\n",
        "      query_padding=query_padding,\n",
        "      method=model.forward_step,\n",
        "      mutable='intermediates')\n",
        "  else:\n",
        "    result, _ = model.apply(\n",
        "    variables={'params': params},\n",
        "    frames=frame,\n",
        "    state=state,\n",
        "    method=model.forward_step,\n",
        "    mutable='intermediates')\n",
        "\n",
        "  return (\n",
        "    result.tracks,\n",
        "    result.visible,\n",
        "    result.state,\n",
        "    result.track_logits,\n",
        "    result.visible_logits,\n",
        "  )\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "gyBJt05JWlRz"
      },
      "outputs": [],
      "source": [
        "# @title Function for per frame evaluation {form-width: \"25%\"}\n",
        "\n",
        "import tqdm\n",
        "\n",
        "def get_window(\n",
        "    coord, softmax, radius=8\n",
        "):\n",
        "  \"\"\"Note: coord is assumed to be a raster coordinate.\"\"\"\n",
        "  start = jnp.maximum(jnp.array(jnp.floor(coord - radius - 0.5), jnp.int32), 0)\n",
        "  softmax = jax.lax.dynamic_slice(softmax, [start], [radius * 2 + 1])\n",
        "  coord = start + 0.5 + jnp.arange(radius * 2 + 1)\n",
        "  return softmax, coord\n",
        "\n",
        "\n",
        "def get_certainty(\n",
        "    coord_yx, track_logits, radius: int = 8\n",
        "):\n",
        "  \"\"\"Get uncertainty from coordinate logits for a single point/frame.\"\"\"\n",
        "  logits_y, logits_x = jnp.split(track_logits, 2, axis=-1)\n",
        "  track_softmax_y = jax.nn.softmax(logits_y)\n",
        "  track_softmax_x = jax.nn.softmax(logits_x)\n",
        "  sm_y, coord_y = get_window(coord_yx[0], track_softmax_y)\n",
        "  sm_x, coord_x = get_window(coord_yx[1], track_softmax_x)\n",
        "  sm = sm_y[:, jnp.newaxis] * sm_x[jnp.newaxis, :]\n",
        "  grid_x, grid_y = jnp.meshgrid(coord_x, coord_y)\n",
        "  grid = jnp.stack([grid_y, grid_x], axis=-1)\n",
        "  in_radius = (\n",
        "      jnp.sum(jnp.square(grid - coord_yx), axis=-1) \u003c= jnp.square(radius) + 1e-8\n",
        "  )\n",
        "  return jnp.sum(sm * in_radius)[jnp.newaxis]\n",
        "\n",
        "\n",
        "def tracker_certainty(\n",
        "    tracks, track_logits, radius: int = 8\n",
        "):\n",
        "  \"\"\"Get certainty from coordinate logits for all points/frames.\n",
        "\n",
        "  Args:\n",
        "    tracks: Tracks in [y, x], raster coordinates.\n",
        "    track_logits: Logits for each track, with y logits first following x logits,\n",
        "      same number of logits as pixels in image.\n",
        "    radius: Radius of the circle in which probability mass is summed.\n",
        "\n",
        "  Returns:\n",
        "    uncertainty probability between 0 and 1.\n",
        "  \"\"\"\n",
        "  vmapped_certain_fn = functools.partial(get_certainty, radius=radius)\n",
        "  for _ in range(len(tracks.shape) - 1):\n",
        "    vmapped_certain_fn = jax.vmap(vmapped_certain_fn)\n",
        "  uncertainty = vmapped_certain_fn(tracks, track_logits)\n",
        "  return uncertainty\n",
        "\n",
        "\n",
        "def run_eval_per_frame(\n",
        "    modelf,\n",
        "    params,\n",
        "    batch,\n",
        "    get_trackwise_metrics=True,\n",
        "    radius=8,\n",
        "    threshold=0.5,\n",
        "    use_certainty=False,\n",
        "):\n",
        "  pred_tracks, pred_visible, tracking_state, track_logits, visible_logits = (\n",
        "      modelf(\n",
        "          params,\n",
        "          frame=batch['video'][:, :1],\n",
        "          queries=batch['query_points'],\n",
        "      )\n",
        "  )\n",
        "  pred_tracks, pred_visible = [pred_tracks], [pred_visible]\n",
        "  pred_track_logits, pred_visible_logits = [track_logits], [visible_logits]\n",
        "  for frame in range(1, batch['video'].shape[1]):\n",
        "    (\n",
        "        curr_tracks,\n",
        "        curr_visible,\n",
        "        tracking_state,\n",
        "        curr_track_logits,\n",
        "        curr_visible_logits,\n",
        "    ) = modelf(\n",
        "        params,\n",
        "        frame=batch['video'][:, frame : frame + 1],\n",
        "        state=tracking_state,\n",
        "    )\n",
        "    import pdb; pdb.set_trace()\n",
        "    pred_tracks.append(np.array(jax.device_get(curr_tracks)))\n",
        "    pred_visible.append(np.array(jax.device_get(curr_visible)))\n",
        "    pred_track_logits.append(np.array(jax.device_get(curr_track_logits)))\n",
        "    pred_visible_logits.append(np.array(jax.device_get(curr_visible_logits)))\n",
        "  tracks = np.concatenate(pred_tracks, axis=2)\n",
        "  pred_visible = np.concatenate(pred_visible, axis=2)\n",
        "  track_logits = np.concatenate(pred_track_logits, axis=2)\n",
        "  visible_logits = np.concatenate(pred_visible_logits, axis=2)\n",
        "\n",
        "  pred_certainty = tracker_certainty(tracks, track_logits, radius)\n",
        "\n",
        "  pred_visible_and_certain = (\n",
        "      jax.nn.sigmoid(visible_logits) * pred_certainty\n",
        "  ) \u003e threshold\n",
        "\n",
        "  if use_certainty:\n",
        "    occluded = np.logical_not(pred_visible_and_certain.squeeze(-1))\n",
        "  else:\n",
        "    occluded = np.logical_not(pred_visible.squeeze(-1))\n",
        "\n",
        "  scalars = evaluation_datasets.compute_tapvid_metrics(\n",
        "      batch['query_points'],\n",
        "      batch['occluded'],\n",
        "      batch['target_points'],\n",
        "      occluded + 0.0,\n",
        "      tracks[..., ::-1],\n",
        "      query_mode='first',\n",
        "      get_trackwise_metrics=get_trackwise_metrics,\n",
        "  )\n",
        "  return (\n",
        "      tracks[..., ::-1],\n",
        "      occluded,\n",
        "      jax.tree.map(lambda x: np.array(np.sum(x, axis=0)), scalars),\n",
        "  )\n",
        "\n",
        "# @title Function for raw data to the input format {form-width: \"25%\"}\n",
        "def deterministic_eval(cached_dataset, strided=False):\n",
        "  if not strided:\n",
        "    for sample in tqdm.tqdm(cached_dataset):\n",
        "      batch = sample['davis'].copy()\n",
        "      # batch['video'] = (batch['video'] + 1) / 2\n",
        "      batch['visible'] = np.logical_not(batch['occluded'])[..., None]\n",
        "      batch['padding'] = np.ones(\n",
        "          batch['query_points'].shape[:2], dtype=np.bool_\n",
        "      )\n",
        "      batch['loss_mask'] = np.ones(\n",
        "          batch['target_points'].shape[:3] + (1,), dtype=np.float32\n",
        "      )\n",
        "      batch['appearance'] = np.ones(\n",
        "          batch['target_points'].shape[:3] + (1,), dtype=np.float32\n",
        "      )\n",
        "\n",
        "      yield batch\n",
        "  else:\n",
        "    for sample in tqdm.tqdm(cached_dataset):\n",
        "      batch = sample['davis'].copy()\n",
        "      # batch['video'] = (batch['video'] + 1) / 2\n",
        "      batch['visible'] = np.logical_not(batch['occluded'])[..., None]\n",
        "      batch['padding'] = np.ones(\n",
        "          batch['query_points'].shape[:2], dtype=np.bool_\n",
        "      )\n",
        "      batch['loss_mask'] = np.ones(\n",
        "          batch['target_points'].shape[:3] + (1,), dtype=np.float32\n",
        "      )\n",
        "      batch['appearance'] = np.ones(\n",
        "          batch['target_points'].shape[:3] + (1,), dtype=np.float32\n",
        "      )\n",
        "      backward_batch = {k: v.copy() for k, v in batch.items()}\n",
        "      for key in ['visible', 'appearance', 'loss_mask', 'target_points']:\n",
        "        backward_batch[key] = np.flip(backward_batch[key], axis=2)\n",
        "      backward_batch['video'] = np.flip(backward_batch['video'], axis=1)\n",
        "      backward_queries = (\n",
        "          backward_batch['video'].shape[1]\n",
        "          - backward_batch['query_points'][..., 0]\n",
        "          - 1\n",
        "      )\n",
        "      backward_batch['query_points'][..., 0] = backward_queries\n",
        "      yield batch, backward_batch"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "TeLmcaPPZFG0"
      },
      "outputs": [],
      "source": [
        "from big_vision import utils\n",
        "import collections\n",
        "\n",
        "# Forked from the main code for simplicity, but once submitted it will be better\n",
        "# just to adhoc-import this function.\n",
        "def recover_tree(keys, values):\n",
        "  \"\"\"Recovers a tree as a nested dict from flat names and values.\n",
        "\n",
        "  This function is useful to analyze checkpoints that are saved by our programs\n",
        "  without need to access the exact source code of the experiment. In particular,\n",
        "  it can be used to extract an reuse various subtrees of the scheckpoint, e.g.\n",
        "  subtree of parameters.\n",
        "\n",
        "  Args:\n",
        "    keys: a list of keys, where '/' is used as separator between nodes.\n",
        "    values: a list of leaf values.\n",
        "\n",
        "  Returns:\n",
        "    A nested tree-like dict.\n",
        "  \"\"\"\n",
        "  tree = {}\n",
        "  sub_trees = collections.defaultdict(list)\n",
        "  for k, v in zip(keys, values):\n",
        "    if \"/\" not in k:\n",
        "      tree[k] = v\n",
        "    else:\n",
        "      k_left, k_right = k.split(\"/\", 1)\n",
        "      sub_trees[k_left].append((k_right, v))\n",
        "  for k, kv_pairs in sub_trees.items():\n",
        "    k_subtree, v_subtree = zip(*kv_pairs)\n",
        "    tree[k] = recover_tree(k_subtree, v_subtree)\n",
        "  return tree\n",
        "\n",
        "\n",
        "ckpt_path = 'tapnet/checkpoints/tapnext_ckpt.npz'\n",
        "loaded_params = utils.npload(ckpt_path)\n",
        "k, v = zip(*list(loaded_params.items()))\n",
        "loaded_params = recover_tree(k, v)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YNf98GICtJMa"
      },
      "outputs": [],
      "source": [
        "# @title Per-frame inference\n",
        "\n",
        "standard_eval_scalars_list = []\n",
        "preds = []\n",
        "for batch in deterministic_eval(cached_dataset):\n",
        "  tracks, occluded, scores = run_eval_per_frame(\n",
        "      tracker_forward, loaded_params, batch, get_trackwise_metrics=False, use_certainty=False\n",
        "  )\n",
        "  standard_eval_scalars_list.append(scores)\n",
        "  preds.append((tracks, occluded))\n",
        "\n",
        "\n",
        "print('')\n",
        "print(\n",
        "    np.mean([\n",
        "        standard_eval_scalars_list[k]['average_jaccard']\n",
        "        for k in range(len(standard_eval_scalars_list))\n",
        "    ])\n",
        ")\n",
        "print(\n",
        "    np.mean([\n",
        "        standard_eval_scalars_list[k]['occlusion_accuracy']\n",
        "        for k in range(len(standard_eval_scalars_list))\n",
        "    ])\n",
        ")\n",
        "print(\n",
        "    np.mean([\n",
        "        standard_eval_scalars_list[k]['average_pts_within_thresh']\n",
        "        for k in range(len(standard_eval_scalars_list))\n",
        "    ])\n",
        ")"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "last_runtime": {
        "build_target": "//learning/grp/tools/ml_python/gpu:ml_notebook",
        "kind": "private"
      },
      "provenance": [
        {
          "file_id": "11QLNbzLDrzDRqO5YQNtzIyy8BA2OkjsF",
          "timestamp": 1737063264847
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
