{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Nerfies Training v2.ipynb",
      "private_outputs": true,
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "TPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EZ_wkNVdTz-C"
      },
      "source": [
        "# Let's train a Nerfie!\n",
        "\n",
        "**Author**: [Keunhong Park](https://keunhong.com)\n",
        "\n",
        "[[Project Page](https://nerfies.github.io)]\n",
        "[[Paper](https://storage.googleapis.com/nerfies-public/videos/nerfies_paper.pdf)]\n",
        "[[Video](https://www.youtube.com/watch?v=MrKrnHhk8IA)]\n",
        "[[GitHub](https://github.com/google/nerfies)]\n",
        "\n",
        "This notebook provides an demo for training Nerfies.\n",
        "\n",
        "### Instructions\n",
        "\n",
        "1. Convert a video into our dataset format using the [dataset processing notebook](https://colab.sandbox.google.com/github/google/nerfies/blob/main/notebooks/Nerfies_Capture_Processing.ipynb).\n",
        "2. Set the `data_dir` below to where you saved the dataset.\n",
        "3. Come back to this notebook to train a nerfie.\n",
        "\n",
        "\n",
        "### Notes\n",
        " * To accomodate the limited compute power of Colab runtimes, this notebook defaults to a \"toy\" version of our method. The number of samples have been reduced and the elastic regularization turned off.\n",
        "\n",
        " * To train a high-quality model, please look at the CLI options we provide in the [Github repository](https://github.com/google/nerfies).\n",
        "\n",
        "\n",
        "\n",
        " * Please report issues on the [GitHub issue tracker](https://github.com/google/nerfies/issues).\n",
        "\n",
        "\n",
        "If you find this work useful, please consider citing:\n",
        "```bibtex\n",
        "@article{park2021nerfies\n",
        "  author    = {Park, Keunhong \n",
        "               and Sinha, Utkarsh \n",
        "               and Barron, Jonathan T. \n",
        "               and Bouaziz, Sofien \n",
        "               and Goldman, Dan B \n",
        "               and Seitz, Steven M. \n",
        "               and Martin-Brualla, Ricardo},\n",
        "  title     = {Nerfies: Deformable Neural Radiance Fields},\n",
        "  journal   = {ICCV},\n",
        "  year      = {2021},\n",
        "}\n",
        "```\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OlW1gF_djH6H"
      },
      "source": [
        "## Environment Setup"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "I6Jbspl7TnIX"
      },
      "source": [
        "!pip install flax immutabledict mediapy\n",
        "!pip install --upgrade git+https://github.com/google/nerfies@v2"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zGJux-m5Xp3Z",
        "cellView": "form"
      },
      "source": [
        "# @title Configure notebook runtime\n",
        "# @markdown If you would like to use a GPU runtime instead, change the runtime type by going to `Runtime > Change runtime type`. \n",
        "# @markdown You will have to use a smaller batch size on GPU.\n",
        "\n",
        "runtime_type = 'tpu'  # @param ['gpu', 'tpu']\n",
        "if runtime_type == 'tpu':\n",
        "  import jax.tools.colab_tpu\n",
        "  jax.tools.colab_tpu.setup_tpu()\n",
        "\n",
        "print('Detected Devices:', jax.devices())"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "afUtLfRWULEi",
        "cellView": "form"
      },
      "source": [
        "# @title Mount Google Drive\n",
        "# @markdown Mount Google Drive onto `/content/gdrive`. You can skip this if running locally.\n",
        "\n",
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ENOfbG3AkcVN",
        "cellView": "form"
      },
      "source": [
        "# @title Define imports and utility functions.\n",
        "\n",
        "import jax\n",
        "from jax.config import config as jax_config\n",
        "import jax.numpy as jnp\n",
        "from jax import grad, jit, vmap\n",
        "from jax import random\n",
        "\n",
        "import flax\n",
        "import flax.linen as nn\n",
        "from flax import jax_utils\n",
        "from flax import optim\n",
        "from flax.metrics import tensorboard\n",
        "from flax.training import checkpoints\n",
        "jax_config.enable_omnistaging() # Linen requires enabling omnistaging\n",
        "\n",
        "from absl import logging\n",
        "from io import BytesIO\n",
        "import random as pyrandom\n",
        "import numpy as np\n",
        "import PIL\n",
        "import IPython\n",
        "\n",
        "\n",
        "# Monkey patch logging.\n",
        "def myprint(msg, *args, **kwargs):\n",
        " print(msg % args)\n",
        "\n",
        "logging.info = myprint \n",
        "logging.warn = myprint\n",
        "logging.error = myprint\n",
        "\n",
        "\n",
        "def show_image(image, fmt='png'):\n",
        "    image = image_utils.image_to_uint8(image)\n",
        "    f = BytesIO()\n",
        "    PIL.Image.fromarray(image).save(f, fmt)\n",
        "    IPython.display.display(IPython.display.Image(data=f.getvalue()))\n",
        "\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wW7FsSB-jORB"
      },
      "source": [
        "## Configuration"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rz7wRm7YT9Ka"
      },
      "source": [
        "# @title Model and dataset configuration\n",
        "\n",
        "from pathlib import Path\n",
        "from pprint import pprint\n",
        "import gin\n",
        "from IPython.display import display, Markdown\n",
        "\n",
        "from nerfies import configs\n",
        "\n",
        "\n",
        "# @markdown The working directory.\n",
        "train_dir = '/content/gdrive/My Drive/nerfies/experiments/capture1/exp1'  # @param {type: \"string\"}\n",
        "# @markdown The directory to the dataset capture.\n",
        "data_dir = '/content/gdrive/My Drive/nerfies/captures/capture1'  # @param {type: \"string\"}\n",
        "\n",
        "# @markdown Training configuration.\n",
        "max_steps = 100000  # @param {type: 'number'}\n",
        "batch_size = 4096  # @param {type: 'number'}\n",
        "image_scale = 8  # @param {type: 'number'}\n",
        "\n",
        "# @markdown Model configuration.\n",
        "use_viewdirs = True  #@param {type: 'boolean'}\n",
        "use_appearance_metadata = True  #@param {type: 'boolean'}\n",
        "warp_field_type = 'se3'  #@param['se3', 'translation']\n",
        "num_warp_freqs = 8  #@param{type:'number'}\n",
        "num_coarse_samples = 64  # @param {type: 'number'}\n",
        "num_fine_samples = 64  # @param {type: 'number'}\n",
        "\n",
        "checkpoint_dir = Path(train_dir, 'checkpoints')\n",
        "checkpoint_dir.mkdir(exist_ok=True, parents=True)\n",
        "\n",
        "config_str = f\"\"\"\n",
        "ExperimentConfig.image_scale = {image_scale}\n",
        "ExperimentConfig.datasource_spec = {{\n",
        "    'type': 'nerfies',\n",
        "    'data_dir': '{data_dir}',\n",
        "    'camera_type': 'json',\n",
        "}}\n",
        "\n",
        "ModelConfig.use_warp = True\n",
        "ModelConfig.use_viewdirs = {int(use_viewdirs)}\n",
        "ModelConfig.use_appearance_metadata = {int(use_appearance_metadata)}\n",
        "ModelConfig.warp_field_type = 'se3'\n",
        "ModelConfig.num_warp_freqs = {num_warp_freqs}\n",
        "ModelConfig.num_coarse_samples = {num_coarse_samples}\n",
        "ModelConfig.num_fine_samples = {num_fine_samples}\n",
        "\n",
        "TrainConfig.max_steps = {max_steps}\n",
        "TrainConfig.batch_size = {batch_size}\n",
        "TrainConfig.print_every = 100\n",
        "TrainConfig.use_elastic_loss = False\n",
        "TrainConfig.use_background_loss = False\n",
        "TrainConfig.warp_alpha_schedule = {{\n",
        "    'type': 'linear',\n",
        "    'initial_value': 0.0,\n",
        "    'final_value': {num_warp_freqs},\n",
        "    'num_steps': {int(max_steps*0.8)},\n",
        "}}\n",
        "\"\"\"\n",
        "\n",
        "gin.parse_config(config_str)\n",
        "\n",
        "config_path = Path(train_dir, 'config.gin')\n",
        "with open(config_path, 'w') as f:\n",
        "  logging.info('Saving config to %s', config_path)\n",
        "  f.write(config_str)\n",
        "\n",
        "exp_config = configs.ExperimentConfig()\n",
        "model_config = configs.ModelConfig()\n",
        "train_config = configs.TrainConfig()\n",
        "eval_config = configs.EvalConfig()\n",
        "\n",
        "display(Markdown(\n",
        "    gin.config.markdown(gin.operative_config_str())))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "r872r6hiVUVS",
        "cellView": "form"
      },
      "source": [
        "# @title Create datasource and show an example.\n",
        "\n",
        "from nerfies import datasets\n",
        "from nerfies import image_utils\n",
        "\n",
        "datasource = datasets.from_config(\n",
        "  exp_config.datasource_spec,\n",
        "  image_scale=exp_config.image_scale,\n",
        "  use_appearance_id=model_config.use_appearance_metadata,\n",
        "  use_camera_id=model_config.use_camera_metadata,\n",
        "  use_warp_id=model_config.use_warp,\n",
        "  random_seed=exp_config.random_seed)\n",
        "\n",
        "show_image(datasource.load_rgb(datasource.train_ids[0]))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XC3PIY74XB05",
        "cellView": "form"
      },
      "source": [
        "# @title Create training iterators\n",
        "\n",
        "devices = jax.local_devices()\n",
        "\n",
        "train_iter = datasource.create_iterator(\n",
        "    datasource.train_ids,\n",
        "    batch_size=train_config.batch_size,\n",
        "    flatten=True,\n",
        "    shuffle=True,\n",
        "    prefetch_size=3,\n",
        "    devices=devices)\n",
        "\n",
        "def shuffled(l):\n",
        "  import random as r\n",
        "  import copy\n",
        "  l = copy.copy(l)\n",
        "  r.shuffle(l)\n",
        "  return l\n",
        "\n",
        "train_eval_iter = datasource.create_iterator(\n",
        "    shuffled(datasource.train_ids), batch_size=0, devices=devices)\n",
        "val_eval_iter = datasource.create_iterator(\n",
        "    shuffled(datasource.val_ids), batch_size=0, devices=devices)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "erY9l66KjYYW"
      },
      "source": [
        "## Training"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nZnS8BhcXe5E",
        "cellView": "form"
      },
      "source": [
        "# @title Initialize model\n",
        "# @markdown Defines the model and initializes its parameters.\n",
        "\n",
        "from flax.training import checkpoints\n",
        "from nerfies import models\n",
        "from nerfies import model_utils\n",
        "from nerfies import schedules\n",
        "from nerfies import training\n",
        "\n",
        "# @markdown Restore a checkpoint if one exists.\n",
        "restore_checkpoint = False  # @param{type:'boolean'}\n",
        "\n",
        "\n",
        "rng = random.PRNGKey(exp_config.random_seed)\n",
        "np.random.seed(exp_config.random_seed + jax.process_index())\n",
        "devices_to_use = jax.devices()\n",
        "\n",
        "learning_rate_sched = schedules.from_config(train_config.lr_schedule)\n",
        "warp_alpha_sched = schedules.from_config(train_config.warp_alpha_schedule)\n",
        "elastic_loss_weight_sched = schedules.from_config(\n",
        "    train_config.elastic_loss_weight_schedule)\n",
        "\n",
        "rng, key = random.split(rng)\n",
        "params = {}\n",
        "model, params['model'] = models.construct_nerf(\n",
        "    key,\n",
        "    model_config,\n",
        "    batch_size=train_config.batch_size,\n",
        "    appearance_ids=datasource.appearance_ids,\n",
        "    camera_ids=datasource.camera_ids,\n",
        "    warp_ids=datasource.warp_ids,\n",
        "    near=datasource.near,\n",
        "    far=datasource.far,\n",
        "    use_warp_jacobian=train_config.use_elastic_loss,\n",
        "    use_weights=train_config.use_elastic_loss)\n",
        "\n",
        "optimizer_def = optim.Adam(learning_rate_sched(0))\n",
        "optimizer = optimizer_def.create(params)\n",
        "state = model_utils.TrainState(\n",
        "    optimizer=optimizer,\n",
        "    warp_alpha=warp_alpha_sched(0))\n",
        "scalar_params = training.ScalarParams(\n",
        "    learning_rate=learning_rate_sched(0),\n",
        "    elastic_loss_weight=elastic_loss_weight_sched(0),\n",
        "    warp_reg_loss_weight=train_config.warp_reg_loss_weight,\n",
        "    warp_reg_loss_alpha=train_config.warp_reg_loss_alpha,\n",
        "    warp_reg_loss_scale=train_config.warp_reg_loss_scale,\n",
        "    background_loss_weight=train_config.background_loss_weight)\n",
        "if restore_checkpoint:\n",
        "  logging.info('Restoring checkpoint from %s', checkpoint_dir)\n",
        "  state = checkpoints.restore_checkpoint(checkpoint_dir, state)\n",
        "step = state.optimizer.state.step + 1\n",
        "state = jax_utils.replicate(state, devices=devices)\n",
        "del params"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "at2CL5DRZ7By",
        "cellView": "form"
      },
      "source": [
        "# @title Define pmapped functions\n",
        "# @markdown This parallelizes the training and evaluation step functions using `jax.pmap`.\n",
        "\n",
        "import functools\n",
        "from nerfies import evaluation\n",
        "\n",
        "\n",
        "def _model_fn(key_0, key_1, params, rays_dict, warp_extra):\n",
        "  out = model.apply({'params': params},\n",
        "                    rays_dict,\n",
        "                    warp_extra=warp_extra,\n",
        "                    rngs={\n",
        "                        'coarse': key_0,\n",
        "                        'fine': key_1\n",
        "                    },\n",
        "                    mutable=False)\n",
        "  return jax.lax.all_gather(out, axis_name='batch')\n",
        "\n",
        "pmodel_fn = jax.pmap(\n",
        "    # Note rng_keys are useless in eval mode since there's no randomness.\n",
        "    _model_fn,\n",
        "    in_axes=(0, 0, 0, 0, 0),  # Only distribute the data input.\n",
        "    devices=devices_to_use,\n",
        "    donate_argnums=(3,),  # Donate the 'rays' argument.\n",
        "    axis_name='batch',\n",
        ")\n",
        "\n",
        "render_fn = functools.partial(evaluation.render_image,\n",
        "                              model_fn=pmodel_fn,\n",
        "                              device_count=len(devices),\n",
        "                              chunk=eval_config.chunk)\n",
        "train_step = functools.partial(\n",
        "    training.train_step,\n",
        "    model,\n",
        "    elastic_reduce_method=train_config.elastic_reduce_method,\n",
        "    elastic_loss_type=train_config.elastic_loss_type,\n",
        "    use_elastic_loss=train_config.use_elastic_loss,\n",
        "    use_background_loss=train_config.use_background_loss,\n",
        "    use_warp_reg_loss=train_config.use_warp_reg_loss,\n",
        ")\n",
        "ptrain_step = jax.pmap(\n",
        "    train_step,\n",
        "    axis_name='batch',\n",
        "    devices=devices,\n",
        "    # rng_key, state, batch, scalar_params.\n",
        "    in_axes=(0, 0, 0, None),\n",
        "    # Treat use_elastic_loss as compile-time static.\n",
        "    donate_argnums=(2,),  # Donate the 'batch' argument.\n",
        ")"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vbc7cMr5aR_1",
        "cellView": "form"
      },
      "source": [
        "# @title Train a Nerfie!\n",
        "# @markdown This runs the training loop!\n",
        "\n",
        "import mediapy\n",
        "from nerfies import utils\n",
        "from nerfies import visualization as viz\n",
        "\n",
        "\n",
        "print_every_n_iterations = 100  # @param{type:'number'}\n",
        "visualize_results_every_n_iterations = 500  # @param{type:'number'}\n",
        "save_checkpoint_every_n_iterations = 1000  # @param{type:'number'}\n",
        "\n",
        "\n",
        "logging.info('Starting training')\n",
        "rng = rng + jax.process_index()  # Make random seed separate across hosts.\n",
        "keys = random.split(rng, len(devices))\n",
        "time_tracker = utils.TimeTracker()\n",
        "time_tracker.tic('data', 'total')\n",
        "\n",
        "for step, batch in zip(range(step, train_config.max_steps + 1), train_iter):\n",
        "  time_tracker.toc('data')\n",
        "  scalar_params = scalar_params.replace(\n",
        "      learning_rate=learning_rate_sched(step),\n",
        "      elastic_loss_weight=elastic_loss_weight_sched(step))\n",
        "  warp_alpha = jax_utils.replicate(warp_alpha_sched(step), devices)\n",
        "  state = state.replace(warp_alpha=warp_alpha)\n",
        "\n",
        "  with time_tracker.record_time('train_step'):\n",
        "    state, stats, keys = ptrain_step(keys, state, batch, scalar_params)\n",
        "    time_tracker.toc('total')\n",
        "\n",
        "  if step % print_every_n_iterations == 0:\n",
        "    logging.info(\n",
        "        'step=%d, warp_alpha=%.04f, %s',\n",
        "        step, warp_alpha_sched(step), time_tracker.summary_str('last'))\n",
        "    coarse_metrics_str = ', '.join(\n",
        "        [f'{k}={v.mean():.04f}' for k, v in stats['coarse'].items()])\n",
        "    fine_metrics_str = ', '.join(\n",
        "        [f'{k}={v.mean():.04f}' for k, v in stats['fine'].items()])\n",
        "    logging.info('\\tcoarse metrics: %s', coarse_metrics_str)\n",
        "    if 'fine' in stats:\n",
        "      logging.info('\\tfine metrics: %s', fine_metrics_str)\n",
        "  \n",
        "  if step % visualize_results_every_n_iterations == 0:\n",
        "    print(f'[step={step}] Training set visualization')\n",
        "    eval_batch = next(train_eval_iter)\n",
        "    render = render_fn(state, eval_batch, rng=rng)\n",
        "    rgb = render['rgb']\n",
        "    acc = render['acc']\n",
        "    depth_exp = render['depth']\n",
        "    depth_med = render['med_depth']\n",
        "    rgb_target = eval_batch['rgb']\n",
        "    depth_med_viz = viz.colorize(depth_med, cmin=datasource.near, cmax=datasource.far)\n",
        "    mediapy.show_images([rgb_target, rgb, depth_med_viz],\n",
        "                        titles=['GT RGB', 'Pred RGB', 'Pred Depth'])\n",
        "\n",
        "    print(f'[step={step}] Validation set visualization')\n",
        "    eval_batch = next(val_eval_iter)\n",
        "    render = render_fn(state, eval_batch, rng=rng)\n",
        "    rgb = render['rgb']\n",
        "    acc = render['acc']\n",
        "    depth_exp = render['depth']\n",
        "    depth_med = render['med_depth']\n",
        "    rgb_target = eval_batch['rgb']\n",
        "    depth_med_viz = viz.colorize(depth_med, cmin=datasource.near, cmax=datasource.far)\n",
        "    mediapy.show_images([rgb_target, rgb, depth_med_viz],\n",
        "                       titles=['GT RGB', 'Pred RGB', 'Pred Depth'])\n",
        "\n",
        "  if step % save_checkpoint_every_n_iterations == 0:\n",
        "    training.save_checkpoint(checkpoint_dir, state)\n",
        "\n",
        "  time_tracker.tic('data', 'total')\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "o69auGWvdyyd"
      },
      "source": [
        ""
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}