{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "infinite_nature_demo.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Jgl7w6jrMLYI"
      },
      "source": [
        "Copyright 2021 Google LLC.\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\n",
        "you may not use this file except in compliance with the License.\n",
        "You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "Unless required by applicable law or agreed to in writing, software\n",
        "distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\n",
        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "See the License for the specific language governing permissions and\n",
        "limitations under the License.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pmFK0wEyMQ-X"
      },
      "source": [
        "# Infinite Nature example Colab\n",
        "\n",
        "The Colab is part of the code for the paper ___Infinite Nature: Perpetual View Generation of Natural Scenes from a Single Image___, and may be found at <br>https://github.com/google-research/google-research/tree/master/infinite_nature.\n",
        "\n",
        "The project page is at https://infinite-nature.github.io/.\n",
        "\n",
        "Choose __Run all__ from the Runtime menu to:\n",
        "* download our code and the tf_mesh_renderer library which we use,\n",
        "* set up the network and load our trained model,\n",
        "* run a demo generating frames indefinitely with autopilot.\n",
        "\n",
        "You can also interact with our demo by clicking on the image to steer the camera.\n",
        "\n",
        "If an error is encountered while running the demo, please send an email to the authors.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EvBQf1UZNu0j"
      },
      "source": [
        "## Download code, model weights, and example data and install dependencies."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hypYi7EJNxJ6"
      },
      "source": [
        "%%shell\n",
        "echo Fetching code from github...\n",
        "\n",
        "apt install subversion\n",
        "svn export --force https://github.com/google-research/google-research/trunk/infinite_nature\n",
        "\n",
        "echo\n",
        "echo Fetching trained model weights...\n",
        "rm -f autocruise_input*.pkl\n",
        "rm -f ckpt.tar.gz\n",
        "rm -rf ckpt\n",
        "wget https://storage.googleapis.com/gresearch/infinite_nature_public/autocruise_input1.pkl\n",
        "wget https://storage.googleapis.com/gresearch/infinite_nature_public/autocruise_input2.pkl\n",
        "wget https://storage.googleapis.com/gresearch/infinite_nature_public/autocruise_input3.pkl\n",
        "wget https://storage.googleapis.com/gresearch/infinite_nature_public/ckpt.tar.gz\n",
        "tar -xf ckpt.tar.gz\n",
        "\n",
        "echo\n",
        "echo Installing required dependency...\n",
        "pip install tensorflow-addons\n",
        "\n",
        "echo\n",
        "echo Fetching tf_mesh_renderer and compiling kernels...\n",
        "cd infinite_nature\n",
        "rm -rf tf_mesh_renderer\n",
        "source download_tf_mesh_renderer.sh\n",
        "\n",
        "echo Done.\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "08MXs7cBPDwO"
      },
      "source": [
        "import tensorflow as tf\n",
        "import os\n",
        "import sys\n",
        "\n",
        "# Make sure dynamic linking can find tensorflow libraries.\n",
        "os.system('ldconfig ' + tf.sysconfig.get_lib())\n",
        "\n",
        "# Make sure python can find our libraries.\n",
        "sys.path.append('infinite_nature')\n",
        "sys.path.append('infinite_nature/tf_mesh_renderer/mesh_renderer')\n",
        "\n",
        "# Make sure the mesh renderer library knows where to load its .so file from.\n",
        "os.environ['TEST_SRCDIR'] = 'infinite_nature'"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nvJVkxMbGy6D"
      },
      "source": [
        "import imageio\n",
        "import IPython\n",
        "import numpy as np\n",
        "import pickle\n",
        "import tensorflow as tf\n",
        "import tensorflow_hub as hub\n",
        "\n",
        "import config\n",
        "import fly_camera\n",
        "import infinite_nature_lib\n",
        "\n",
        "# Build model and restore checkpoint.\n",
        "config.set_training(False)\n",
        "model_path = \"ckpt/model.ckpt-6935893\"\n",
        "render_refine, style_encoding = infinite_nature_lib.load_model(model_path)\n",
        "initial_rgbds = [\n",
        "    pickle.load(open(\"autocruise_input1.pkl\", \"rb\"))['input_rgbd'],\n",
        "    pickle.load(open(\"autocruise_input2.pkl\", \"rb\"))['input_rgbd'],\n",
        "    pickle.load(open(\"autocruise_input3.pkl\", \"rb\"))['input_rgbd']]\n",
        "\n",
        "# Code for an autopilot demo. We expose two functions that will be invoked\n",
        "# from an HTML/JS frontend: reset and step.\n",
        "\n",
        "# The state that we need to remember while flying:\n",
        "state = {\n",
        "  'intrinsics': None,\n",
        "  'pose': None,\n",
        "  'rgbd': None,\n",
        "  'start_rgbd': None,\n",
        "  'style_noise': None,\n",
        "  'next_pose_function': None,\n",
        "  'direction_offset': None,  # Direction controlled by user's mouse clicks.\n",
        "}\n",
        "\n",
        "def current_image_as_png():\n",
        "  imgdata = tf.image.encode_png(\n",
        "      tf.image.convert_image_dtype(state['rgbd'][..., :3], dtype=tf.uint8))\n",
        "  return IPython.display.Image(data=imgdata.numpy())\n",
        "\n",
        "def reset(rgbd=None):\n",
        "  if rgbd is None:\n",
        "    rgbd = state['start_rgbd']\n",
        "\n",
        "  height, width, _ = rgbd.shape\n",
        "  aspect_ratio = width / float(height)\n",
        "\n",
        "  rgbd = tf.image.resize(rgbd, [160, 256])\n",
        "  state['rgbd'] = rgbd\n",
        "  state['start_rgbd'] = rgbd\n",
        "  state['pose'] = np.array(\n",
        "      [[1.0, 0.0, 0.0, 0.0],\n",
        "       [0.0, 1.0, 0.0, 0.0],\n",
        "       [0.0, 0.0, 1.0, 0.0]],\n",
        "      dtype=np.float32)\n",
        "  # 0.8 focal_x corresponds to a FOV of ~64 degrees.\n",
        "  state['intrinsics'] = np.array(\n",
        "      [0.8, 0.8 * aspect_ratio, .5, .5],\n",
        "      dtype=np.float32)\n",
        "  state['direction_offset'] = (0.0, 0.0)\n",
        "  state['style_noise'] = style_encoding(rgbd)\n",
        "  state['next_pose_function'] = fly_camera.fly_dynamic(\n",
        "    state['intrinsics'],\n",
        "    state['pose'],\n",
        "    turn_function=(lambda _: state['direction_offset']))\n",
        "  return current_image_as_png()\n",
        "\n",
        "\n",
        "def step(offsetx, offsety):\n",
        "  state['direction_offset'] = (offsetx, offsety)\n",
        "  next_pose = state['next_pose_function'](state['rgbd'])\n",
        "  next_rgbd = render_refine(\n",
        "       state['rgbd'], state['style_noise'],\n",
        "       state['pose'], state['intrinsics'],\n",
        "       next_pose, state['intrinsics'])\n",
        "  state['pose'] = next_pose\n",
        "  state['rgbd'] = next_rgbd\n",
        "  return current_image_as_png()\n",
        "\n",
        "\n",
        "# To run on user-supplied images, we use MiDaS V2 to obtain initial disparity.\n",
        "midas_model = hub.load('https://tfhub.dev/intel/midas/v2/2', tags=['serve'])\n",
        "\n",
        "def midas_disparity(rgb):\n",
        "  \"\"\"Computes MiDaS v2 disparity on an RGB input image.\n",
        "\n",
        "  Args:\n",
        "    rgb: [H, W, 3] Range [0.0, 1.0].\n",
        "  Returns:\n",
        "    [H, W, 1] MiDaS disparity resized to the input size and in the range\n",
        "    [0.0, 1.0]\n",
        "  \"\"\"\n",
        "  size = rgb.shape[:2]\n",
        "  resized = tf.image.resize(rgb, [384, 384], tf.image.ResizeMethod.BICUBIC)\n",
        "  # MiDaS networks wants [1, C, H, W]\n",
        "  midas_input = tf.transpose(resized, [2, 0, 1])[tf.newaxis]\n",
        "  prediction = midas_model.signatures['serving_default'](midas_input)['default'][0]\n",
        "  disp_min = tf.reduce_min(prediction)\n",
        "  disp_max = tf.reduce_max(prediction)\n",
        "  prediction = (prediction - disp_min) / (disp_max - disp_min)\n",
        "  return tf.image.resize(\n",
        "      prediction[..., tf.newaxis], size,  method=tf.image.ResizeMethod.AREA)\n",
        "\n",
        "\n",
        "def load_initial(i):\n",
        "  return reset(rgbd=initial_rgbds[i])\n",
        "\n",
        "\n",
        "def load_image(data):\n",
        "  # Data converted from JS ends up as a string, needs to be converted to\n",
        "  # bytes using Latin-1 encoding (which just maps 0-255 to 0-255).\n",
        "  data = data.encode('Latin-1')\n",
        "  rgb = tf.image.decode_image(data, channels=3, dtype=tf.float32)\n",
        "  resized = tf.image.resize(rgb, [160, 256], tf.image.ResizeMethod.AREA)\n",
        "  rgbd = tf.concat([resized, midas_disparity(resized)], axis=-1)\n",
        "  return reset(rgbd=rgbd)\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sCuRX1liUEVM"
      },
      "source": [
        "import IPython\n",
        "from google.colab import output\n",
        "\n",
        "# The front-end for our interactive demo.\n",
        "\n",
        "html='''\n",
        "<style>\n",
        "#view {\n",
        "  width: 512px;\n",
        "  height: 320px;\n",
        "  background-color: #aaa;\n",
        "  background-size: 100% 100%;\n",
        "  border: 1px solid #000;\n",
        "  margin: 20px;\n",
        "  position: relative;\n",
        "}\n",
        "#rgb {\n",
        "  height: 100%;\n",
        "}\n",
        "#cursor {\n",
        "  position: absolute;\n",
        "  height: 0; width: 0;\n",
        "  left: 50%; top: 50%;\n",
        "  opacity: .5;\n",
        "}\n",
        "#cursor::before, #cursor::after {\n",
        "  content: '';\n",
        "  position: absolute;\n",
        "  background: #f04;\n",
        "  pointer-events: none;\n",
        "}\n",
        "#cursor::before {\n",
        "  left: -10px; top: -1px; width: 20px; height: 2px;\n",
        "}\n",
        "#cursor::after {\n",
        "  left: -1px; top: -10px; width: 2px; height: 20px;\n",
        "}\n",
        ".buttons {\n",
        "  margin: 20px;\n",
        "}\n",
        ".buttons div {\n",
        "  display: inline-block;\n",
        "  cursor: pointer;\n",
        "  padding: 20px;\n",
        "  background: #eee;\n",
        "  border: 2px solid #aaa;\n",
        "  border-radius: 3px;\n",
        "  margin-right: 10px;\n",
        "  font-weight: bold;\n",
        "  text-transform: uppercase;\n",
        "  letter-spacing: 1px;\n",
        "  color: #444;\n",
        "}\n",
        ".buttons div:active {\n",
        "  background: #444;\n",
        "  color: #fff;\n",
        "}\n",
        "h3 {\n",
        "  margin-left: 20px;\n",
        "}\n",
        "</style>\n",
        "<h3>Infinite Nature interactive demo</h3>\n",
        "<div id=view><img id=rgb><div id=cursor></div></div>\n",
        "<div class=buttons>\n",
        "Click <b>Play</b> to run or <b>Step</b> to advance frame by frame.\n",
        "Click mouse over image to steer.<br><br>\n",
        "<div id=restart>Restart</div><div id=play>Play</div><div id=pause>Pause</div><div id=step>Step</div>\n",
        "<br><br>\n",
        "Select starting image (be patient…):<br><br>\n",
        "<div id=image1>Image 1</div><div id=image2>Image 2</div><div id=image3>Image 3</div><div id=upload>Upload…</div><br>\n",
        "<input style=\"display:none\" type=file id=chooser accept=\".png,.jpg\">\n",
        "</div>\n",
        "<script>\n",
        "let playing = true;\n",
        "let pending = false;\n",
        "let x = 0.5;\n",
        "let y = 0.5;\n",
        "let cursor_count = 0;\n",
        "\n",
        "async function call(name, ...parms) {\n",
        "  pending = true;\n",
        "  const result = await google.colab.kernel.invokeFunction(name, parms, {});\n",
        "  pending = false;\n",
        "  const url = `data:image/png;base64,${result.data['image/png']}`;\n",
        "  document.querySelector('#rgb').src = url;\n",
        "  if (!playing) { return; }\n",
        "  step();\n",
        "}\n",
        "\n",
        "async function reset() {\n",
        "  playing = false;\n",
        "  await call('reset');\n",
        "}\n",
        "\n",
        "async function selectImage(i) {\n",
        "  playing = false;\n",
        "  await call('load_initial', i);\n",
        "}\n",
        "\n",
        "function upload() {\n",
        "  playing = false;\n",
        "  document.querySelector('#chooser').click();\n",
        "}\n",
        "\n",
        "function uploadFile(file) {\n",
        "  if (file.type != 'image/png' && file.type != 'image/jpeg') {\n",
        "    error('Only PNG or JPEG files accepted.');\n",
        "    return;\n",
        "  }\n",
        "  console.log(file);\n",
        "  const reader = new FileReader();\n",
        "  reader.onload = (e) => {\n",
        "    const imagebytes = e.target.result;\n",
        "    call('load_image', imagebytes);\n",
        "  }\n",
        "  document.querySelector('#rgb').src = '';\n",
        "  reader.readAsBinaryString(file);\n",
        "}\n",
        "\n",
        "async function step() {\n",
        "  if (pending) { return; }\n",
        "  await call('step', 2*x - 1, 2*y - 1);\n",
        "  // Cursor moves back towards center.\n",
        "  if (cursor_count) {\n",
        "    cursor_count--;\n",
        "  } else {\n",
        "    x = 0.5 + (x - 0.5) * .9;\n",
        "    y = 0.5 + (y - 0.5) * .9;\n",
        "    update_cursor();\n",
        "  }\n",
        "}\n",
        "\n",
        "async function play() {\n",
        "  playing = true;\n",
        "  await step();\n",
        "}\n",
        "\n",
        "async function pause() {\n",
        "  playing = false;\n",
        "}\n",
        "\n",
        "function update_cursor() {\n",
        "  let cursor = document.querySelector('#cursor');\n",
        "  cursor.style.left = `${(100 * x).toFixed(2)}%`;\n",
        "  cursor.style.top = `${(100 * y).toFixed(2)}%`;\n",
        "}\n",
        "\n",
        "function cursor(e) {\n",
        "  console.log(e);\n",
        "  x = e.offsetX / e.target.clientWidth;\n",
        "  y = e.offsetY / e.target.clientHeight;\n",
        "  cursor_count = 1;\n",
        "  update_cursor();\n",
        "}\n",
        "\n",
        "document.querySelector('#restart').addEventListener('click', reset);\n",
        "document.querySelector('#image1').addEventListener('click', () => selectImage(0));\n",
        "document.querySelector('#image2').addEventListener('click', () => selectImage(1));\n",
        "document.querySelector('#image3').addEventListener('click', () => selectImage(2));\n",
        "document.querySelector('#upload').addEventListener('click', upload);\n",
        "document.querySelector('#play').addEventListener('click', play);\n",
        "document.querySelector('#pause').addEventListener('click', pause);\n",
        "document.querySelector('#step').addEventListener('click', () => { playing = false; step(); });\n",
        "document.querySelector('#view').addEventListener('click', cursor);\n",
        "document.querySelector('#chooser').addEventListener('change', (e) => {\n",
        "  if (e.target.files.length > 0) {\n",
        "    uploadFile(e.target.files[0]);\n",
        "  }\n",
        "});\n",
        "selectImage(0);\n",
        "</script>\n",
        "'''\n",
        "\n",
        "display(IPython.display.HTML(html))\n",
        "\n",
        "output.register_callback('load_initial', load_initial)\n",
        "output.register_callback('load_image', load_image)\n",
        "output.register_callback('reset', reset)\n",
        "output.register_callback('step', step)\n"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}