{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<a href=\"https://colab.research.google.com/github/facebookresearch/habitat-sim/blob/main/examples/tutorials/colabs/ECCV_2020_Interactivity.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#Habitat-sim Interactivity\n",
    "\n",
    "This use-case driven tutorial covers Habitat-sim interactivity, including:\n",
    "- Adding new objects to a scene\n",
    "- Kinematic object manipulation\n",
    "- Physics simulation API\n",
    "- Sampling valid object locations\n",
    "- Generating a NavMesh including STATIC objects\n",
    "- Agent embodiment and continuous control"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Installation { display-mode: \"form\" }\n",
    "# @markdown (double click to show code).\n",
    "\n",
    "!curl -L https://raw.githubusercontent.com/facebookresearch/habitat-sim/main/examples/colab_utils/colab_install.sh | NIGHTLY=true bash -s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Path Setup and Imports { display-mode: \"form\" }\n",
    "# @markdown (double click to show code).\n",
    "\n",
    "%cd /content/habitat-sim\n",
    "## [setup]\n",
    "import math\n",
    "import os\n",
    "import random\n",
    "import sys\n",
    "\n",
    "import git\n",
    "import magnum as mn\n",
    "import numpy as np\n",
    "\n",
    "%matplotlib inline\n",
    "from matplotlib import pyplot as plt\n",
    "from PIL import Image\n",
    "\n",
    "import habitat_sim\n",
    "from habitat_sim.utils import common as ut\n",
    "from habitat_sim.utils import viz_utils as vut\n",
    "\n",
    "try:\n",
    "    import ipywidgets as widgets\n",
    "    from IPython.display import display as ipydisplay\n",
    "\n",
    "    # For using jupyter/ipywidget IO components\n",
    "\n",
    "    HAS_WIDGETS = True\n",
    "except ImportError:\n",
    "    HAS_WIDGETS = False\n",
    "\n",
    "\n",
    "if \"google.colab\" in sys.modules:\n",
    "    os.environ[\"IMAGEIO_FFMPEG_EXE\"] = \"/usr/bin/ffmpeg\"\n",
    "\n",
    "repo = git.Repo(\".\", search_parent_directories=True)\n",
    "dir_path = repo.working_tree_dir\n",
    "%cd $dir_path\n",
    "data_path = os.path.join(dir_path, \"data\")\n",
    "output_directory = \"examples/tutorials/interactivity_output/\"  # @param {type:\"string\"}\n",
    "output_path = os.path.join(dir_path, output_directory)\n",
    "if not os.path.exists(output_path):\n",
    "    os.mkdir(output_path)\n",
    "\n",
    "# define some globals the first time we run.\n",
    "if \"sim\" not in globals():\n",
    "    global sim\n",
    "    sim = None\n",
    "    global obj_attr_mgr\n",
    "    obj_attr_mgr = None\n",
    "    global prim_attr_mgr\n",
    "    obj_attr_mgr = None\n",
    "    global stage_attr_mgr\n",
    "    stage_attr_mgr = None\n",
    "    global rigid_obj_mgr\n",
    "    rigid_obj_mgr = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Define Configuration Utility Functions { display-mode: \"form\" }\n",
    "# @markdown (double click to show code)\n",
    "\n",
    "# @markdown This cell defines a number of utility functions used throughout the tutorial to make simulator reconstruction easy:\n",
    "# @markdown - make_cfg\n",
    "# @markdown - make_default_settings\n",
    "# @markdown - make_simulator_from_settings\n",
    "\n",
    "\n",
    "def make_cfg(settings):\n",
    "    sim_cfg = habitat_sim.SimulatorConfiguration()\n",
    "    sim_cfg.gpu_device_id = 0\n",
    "    sim_cfg.scene_id = settings[\"scene\"]\n",
    "    sim_cfg.enable_physics = settings[\"enable_physics\"]\n",
    "\n",
    "    # Note: all sensors must have the same resolution\n",
    "    sensor_specs = []\n",
    "    if settings[\"color_sensor_1st_person\"]:\n",
    "        color_sensor_1st_person_spec = habitat_sim.CameraSensorSpec()\n",
    "        color_sensor_1st_person_spec.uuid = \"color_sensor_1st_person\"\n",
    "        color_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.COLOR\n",
    "        color_sensor_1st_person_spec.resolution = [\n",
    "            settings[\"height\"],\n",
    "            settings[\"width\"],\n",
    "        ]\n",
    "        color_sensor_1st_person_spec.position = [0.0, settings[\"sensor_height\"], 0.0]\n",
    "        color_sensor_1st_person_spec.orientation = [\n",
    "            settings[\"sensor_pitch\"],\n",
    "            0.0,\n",
    "            0.0,\n",
    "        ]\n",
    "        color_sensor_1st_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n",
    "        sensor_specs.append(color_sensor_1st_person_spec)\n",
    "    if settings[\"depth_sensor_1st_person\"]:\n",
    "        depth_sensor_1st_person_spec = habitat_sim.CameraSensorSpec()\n",
    "        depth_sensor_1st_person_spec.uuid = \"depth_sensor_1st_person\"\n",
    "        depth_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.DEPTH\n",
    "        depth_sensor_1st_person_spec.resolution = [\n",
    "            settings[\"height\"],\n",
    "            settings[\"width\"],\n",
    "        ]\n",
    "        depth_sensor_1st_person_spec.position = [0.0, settings[\"sensor_height\"], 0.0]\n",
    "        depth_sensor_1st_person_spec.orientation = [\n",
    "            settings[\"sensor_pitch\"],\n",
    "            0.0,\n",
    "            0.0,\n",
    "        ]\n",
    "        depth_sensor_1st_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n",
    "        sensor_specs.append(depth_sensor_1st_person_spec)\n",
    "    if settings[\"semantic_sensor_1st_person\"]:\n",
    "        semantic_sensor_1st_person_spec = habitat_sim.CameraSensorSpec()\n",
    "        semantic_sensor_1st_person_spec.uuid = \"semantic_sensor_1st_person\"\n",
    "        semantic_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.SEMANTIC\n",
    "        semantic_sensor_1st_person_spec.resolution = [\n",
    "            settings[\"height\"],\n",
    "            settings[\"width\"],\n",
    "        ]\n",
    "        semantic_sensor_1st_person_spec.position = [\n",
    "            0.0,\n",
    "            settings[\"sensor_height\"],\n",
    "            0.0,\n",
    "        ]\n",
    "        semantic_sensor_1st_person_spec.orientation = [\n",
    "            settings[\"sensor_pitch\"],\n",
    "            0.0,\n",
    "            0.0,\n",
    "        ]\n",
    "        semantic_sensor_1st_person_spec.sensor_subtype = (\n",
    "            habitat_sim.SensorSubType.PINHOLE\n",
    "        )\n",
    "        sensor_specs.append(semantic_sensor_1st_person_spec)\n",
    "    if settings[\"color_sensor_3rd_person\"]:\n",
    "        color_sensor_3rd_person_spec = habitat_sim.CameraSensorSpec()\n",
    "        color_sensor_3rd_person_spec.uuid = \"color_sensor_3rd_person\"\n",
    "        color_sensor_3rd_person_spec.sensor_type = habitat_sim.SensorType.COLOR\n",
    "        color_sensor_3rd_person_spec.resolution = [\n",
    "            settings[\"height\"],\n",
    "            settings[\"width\"],\n",
    "        ]\n",
    "        color_sensor_3rd_person_spec.position = [\n",
    "            0.0,\n",
    "            settings[\"sensor_height\"] + 0.2,\n",
    "            0.2,\n",
    "        ]\n",
    "        color_sensor_3rd_person_spec.orientation = [-math.pi / 4, 0, 0]\n",
    "        color_sensor_3rd_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n",
    "        sensor_specs.append(color_sensor_3rd_person_spec)\n",
    "\n",
    "    # Here you can specify the amount of displacement in a forward action and the turn angle\n",
    "    agent_cfg = habitat_sim.agent.AgentConfiguration()\n",
    "    agent_cfg.sensor_specifications = sensor_specs\n",
    "    return habitat_sim.Configuration(sim_cfg, [agent_cfg])\n",
    "\n",
    "\n",
    "def make_default_settings():\n",
    "    settings = {\n",
    "        \"width\": 720,  # Spatial resolution of the observations\n",
    "        \"height\": 544,\n",
    "        \"scene\": \"./data/scene_datasets/mp3d_example/17DRP5sb8fy/17DRP5sb8fy.glb\",  # Scene path\n",
    "        \"default_agent\": 0,\n",
    "        \"sensor_height\": 1.5,  # Height of sensors in meters\n",
    "        \"sensor_pitch\": -math.pi / 8.0,  # sensor pitch (x rotation in rads)\n",
    "        \"color_sensor_1st_person\": True,  # RGB sensor\n",
    "        \"color_sensor_3rd_person\": False,  # RGB sensor 3rd person\n",
    "        \"depth_sensor_1st_person\": False,  # Depth sensor\n",
    "        \"semantic_sensor_1st_person\": False,  # Semantic sensor\n",
    "        \"seed\": 1,\n",
    "        \"enable_physics\": True,  # enable dynamics simulation\n",
    "    }\n",
    "    return settings\n",
    "\n",
    "\n",
    "def make_simulator_from_settings(sim_settings):\n",
    "    cfg = make_cfg(sim_settings)\n",
    "    # clean-up the current simulator instance if it exists\n",
    "    global sim\n",
    "    global obj_attr_mgr\n",
    "    global prim_attr_mgr\n",
    "    global stage_attr_mgr\n",
    "    global rigid_obj_mgr\n",
    "    if sim != None:\n",
    "        sim.close()\n",
    "    # initialize the simulator\n",
    "    sim = habitat_sim.Simulator(cfg)\n",
    "    # Managers of various Attributes templates\n",
    "    obj_attr_mgr = sim.get_object_template_manager()\n",
    "    obj_attr_mgr.load_configs(str(os.path.join(data_path, \"objects/example_objects\")))\n",
    "    obj_attr_mgr.load_configs(str(os.path.join(data_path, \"objects/locobot_merged\")))\n",
    "    prim_attr_mgr = sim.get_asset_template_manager()\n",
    "    stage_attr_mgr = sim.get_stage_template_manager()\n",
    "    # Manager providing access to rigid objects\n",
    "    rigid_obj_mgr = sim.get_rigid_object_manager()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Define Simulation Utility Functions { display-mode: \"form\" }\n",
    "# @markdown (double click to show code)\n",
    "\n",
    "# @markdown - remove_all_objects\n",
    "# @markdown - simulate\n",
    "# @markdown - sample_object_state\n",
    "\n",
    "\n",
    "def simulate(sim, dt=1.0, get_frames=True):\n",
    "    # simulate dt seconds at 60Hz to the nearest fixed timestep\n",
    "    print(\"Simulating \" + str(dt) + \" world seconds.\")\n",
    "    observations = []\n",
    "    start_time = sim.get_world_time()\n",
    "    while sim.get_world_time() < start_time + dt:\n",
    "        sim.step_physics(1.0 / 60.0)\n",
    "        if get_frames:\n",
    "            observations.append(sim.get_sensor_observations())\n",
    "    return observations\n",
    "\n",
    "\n",
    "# Set an object transform relative to the agent state\n",
    "def set_object_state_from_agent(\n",
    "    sim,\n",
    "    obj,\n",
    "    offset=np.array([0, 2.0, -1.5]),\n",
    "    orientation=mn.Quaternion(((0, 0, 0), 1)),\n",
    "):\n",
    "    agent_transform = sim.agents[0].scene_node.transformation_matrix()\n",
    "    ob_translation = agent_transform.transform_point(offset)\n",
    "    obj.translation = ob_translation\n",
    "    obj.rotation = orientation\n",
    "\n",
    "\n",
    "# sample a random valid state for the object from the scene bounding box or navmesh\n",
    "def sample_object_state(\n",
    "    sim, obj, from_navmesh=True, maintain_object_up=True, max_tries=100, bb=None\n",
    "):\n",
    "    # check that the object is not STATIC\n",
    "    if obj.motion_type is habitat_sim.physics.MotionType.STATIC:\n",
    "        print(\"sample_object_state : Object is STATIC, aborting.\")\n",
    "    if from_navmesh:\n",
    "        if not sim.pathfinder.is_loaded:\n",
    "            print(\"sample_object_state : No pathfinder, aborting.\")\n",
    "            return False\n",
    "    elif not bb:\n",
    "        print(\n",
    "            \"sample_object_state : from_navmesh not specified and no bounding box provided, aborting.\"\n",
    "        )\n",
    "        return False\n",
    "    tries = 0\n",
    "    valid_placement = False\n",
    "    # Note: following assumes sim was not reconfigured without close\n",
    "    scene_collision_margin = stage_attr_mgr.get_template_by_id(0).margin\n",
    "    while not valid_placement and tries < max_tries:\n",
    "        tries += 1\n",
    "        # initialize sample location to random point in scene bounding box\n",
    "        sample_location = np.array([0, 0, 0])\n",
    "        if from_navmesh:\n",
    "            # query random navigable point\n",
    "            sample_location = sim.pathfinder.get_random_navigable_point()\n",
    "        else:\n",
    "            sample_location = np.random.uniform(bb.min, bb.max)\n",
    "        # set the test state\n",
    "        obj.translation = sample_location\n",
    "        if maintain_object_up:\n",
    "            # random rotation only on the Y axis\n",
    "            y_rotation = mn.Quaternion.rotation(\n",
    "                mn.Rad(random.random() * 2 * math.pi), mn.Vector3(0, 1.0, 0)\n",
    "            )\n",
    "            obj.rotation = y_rotation * obj.rotation\n",
    "        else:\n",
    "            # unconstrained random rotation\n",
    "            obj.rotation = ut.random_quaternion()\n",
    "\n",
    "        # raise object such that lowest bounding box corner is above the navmesh sample point.\n",
    "        if from_navmesh:\n",
    "            obj_node = obj.root_scene_node\n",
    "            xform_bb = habitat_sim.geo.get_transformed_bb(\n",
    "                obj_node.cumulative_bb, obj_node.transformation\n",
    "            )\n",
    "            # also account for collision margin of the scene\n",
    "            obj.translation += mn.Vector3(\n",
    "                0, xform_bb.size_y() / 2.0 + scene_collision_margin, 0\n",
    "            )\n",
    "\n",
    "        # test for penetration with the environment\n",
    "        if not sim.contact_test(obj.object_id):\n",
    "            valid_placement = True\n",
    "\n",
    "    if not valid_placement:\n",
    "        return False\n",
    "    return True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Define Visualization Utility Function { display-mode: \"form\" }\n",
    "# @markdown (double click to show code)\n",
    "# @markdown - display_sample\n",
    "\n",
    "# Change to do something like this maybe: https://stackoverflow.com/a/41432704\n",
    "def display_sample(\n",
    "    rgb_obs, semantic_obs=np.array([]), depth_obs=np.array([]), key_points=None\n",
    "):\n",
    "    from habitat_sim.utils.common import d3_40_colors_rgb\n",
    "\n",
    "    rgb_img = Image.fromarray(rgb_obs, mode=\"RGBA\")\n",
    "\n",
    "    arr = [rgb_img]\n",
    "    titles = [\"rgb\"]\n",
    "    if semantic_obs.size != 0:\n",
    "        semantic_img = Image.new(\"P\", (semantic_obs.shape[1], semantic_obs.shape[0]))\n",
    "        semantic_img.putpalette(d3_40_colors_rgb.flatten())\n",
    "        semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))\n",
    "        semantic_img = semantic_img.convert(\"RGBA\")\n",
    "        arr.append(semantic_img)\n",
    "        titles.append(\"semantic\")\n",
    "\n",
    "    if depth_obs.size != 0:\n",
    "        depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8), mode=\"L\")\n",
    "        arr.append(depth_img)\n",
    "        titles.append(\"depth\")\n",
    "\n",
    "    plt.figure(figsize=(12, 8))\n",
    "    for i, data in enumerate(arr):\n",
    "        ax = plt.subplot(1, 3, i + 1)\n",
    "        ax.axis(\"off\")\n",
    "        ax.set_title(titles[i])\n",
    "        # plot points on images\n",
    "        if key_points is not None:\n",
    "            for point in key_points:\n",
    "                plt.plot(point[0], point[1], marker=\"o\", markersize=10, alpha=0.8)\n",
    "        plt.imshow(data)\n",
    "\n",
    "    plt.show(block=False)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    import argparse\n",
    "\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--no-display\", dest=\"display\", action=\"store_false\")\n",
    "    parser.add_argument(\"--no-make-video\", dest=\"make_video\", action=\"store_false\")\n",
    "    parser.set_defaults(show_video=True, make_video=True)\n",
    "    args, _ = parser.parse_known_args()\n",
    "    show_video = args.display\n",
    "    display = args.display\n",
    "    make_video = args.make_video\n",
    "else:\n",
    "    show_video = False\n",
    "    make_video = False\n",
    "    display = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Define Colab GUI Utility Functions { display-mode: \"form\" }\n",
    "# @markdown (double click to show code)\n",
    "\n",
    "# Event handler for dropdowns displaying file-based object handles\n",
    "def on_file_obj_ddl_change(ddl_values):\n",
    "    global sel_file_obj_handle\n",
    "    sel_file_obj_handle = ddl_values[\"new\"]\n",
    "    return sel_file_obj_handle\n",
    "\n",
    "\n",
    "# Event handler for dropdowns displaying prim-based object handles\n",
    "def on_prim_obj_ddl_change(ddl_values):\n",
    "    global sel_prim_obj_handle\n",
    "    sel_prim_obj_handle = ddl_values[\"new\"]\n",
    "    return sel_prim_obj_handle\n",
    "\n",
    "\n",
    "# Event handler for dropdowns displaying asset handles\n",
    "def on_prim_ddl_change(ddl_values):\n",
    "    global sel_asset_handle\n",
    "    sel_asset_handle = ddl_values[\"new\"]\n",
    "    return sel_asset_handle\n",
    "\n",
    "\n",
    "# Build a dropdown list holding obj_handles and set its event handler\n",
    "def set_handle_ddl_widget(obj_handles, handle_types, sel_handle, on_change):\n",
    "    sel_handle = obj_handles[0]\n",
    "    descStr = handle_types + \" Template Handles:\"\n",
    "    style = {\"description_width\": \"300px\"}\n",
    "    obj_ddl = widgets.Dropdown(\n",
    "        options=obj_handles,\n",
    "        value=sel_handle,\n",
    "        description=descStr,\n",
    "        style=style,\n",
    "        disabled=False,\n",
    "        layout={\"width\": \"max-content\"},\n",
    "    )\n",
    "\n",
    "    obj_ddl.observe(on_change, names=\"value\")\n",
    "    return obj_ddl, sel_handle\n",
    "\n",
    "\n",
    "def set_button_launcher(desc):\n",
    "    button = widgets.Button(\n",
    "        description=desc,\n",
    "        layout={\"width\": \"max-content\"},\n",
    "    )\n",
    "    return button\n",
    "\n",
    "\n",
    "def make_sim_and_vid_button(prefix, dt=1.0):\n",
    "    if not HAS_WIDGETS:\n",
    "        return\n",
    "\n",
    "    def on_sim_click(b):\n",
    "        observations = simulate(sim, dt=dt)\n",
    "        vut.make_video(\n",
    "            observations, \"color_sensor_1st_person\", \"color\", output_path + prefix\n",
    "        )\n",
    "\n",
    "    sim_and_vid_btn = set_button_launcher(\"Simulate and Make Video\")\n",
    "    sim_and_vid_btn.on_click(on_sim_click)\n",
    "    ipydisplay(sim_and_vid_btn)\n",
    "\n",
    "\n",
    "def make_clear_all_objects_button():\n",
    "    if not HAS_WIDGETS:\n",
    "        return\n",
    "\n",
    "    def on_clear_click(b):\n",
    "        rigid_obj_mgr.remove_all_objects()\n",
    "\n",
    "    clear_objs_button = set_button_launcher(\"Clear all objects\")\n",
    "    clear_objs_button.on_click(on_clear_click)\n",
    "    ipydisplay(clear_objs_button)\n",
    "\n",
    "\n",
    "# Builds widget-based UI components\n",
    "def build_widget_ui(obj_attr_mgr, prim_attr_mgr):\n",
    "    # Holds the user's desired file-based object template handle\n",
    "    global sel_file_obj_handle\n",
    "    sel_file_obj_handle = \"\"\n",
    "\n",
    "    # Holds the user's desired primitive-based object template handle\n",
    "    global sel_prim_obj_handle\n",
    "    sel_prim_obj_handle = \"\"\n",
    "\n",
    "    # Holds the user's desired primitive asset template handle\n",
    "    global sel_asset_handle\n",
    "    sel_asset_handle = \"\"\n",
    "\n",
    "    # Construct DDLs and assign event handlers\n",
    "    # All file-based object template handles\n",
    "    file_obj_handles = obj_attr_mgr.get_file_template_handles()\n",
    "    prim_obj_handles = obj_attr_mgr.get_synth_template_handles()\n",
    "    prim_asset_handles = prim_attr_mgr.get_template_handles()\n",
    "    if not HAS_WIDGETS:\n",
    "        sel_file_obj_handle = file_obj_handles[0]\n",
    "        sel_prim_obj_handle = prim_obj_handles[0]\n",
    "        sel_asset_handle = prim_asset_handles[0]\n",
    "        return\n",
    "    file_obj_ddl, sel_file_obj_handle = set_handle_ddl_widget(\n",
    "        file_obj_handles,\n",
    "        \"File-based Object\",\n",
    "        sel_file_obj_handle,\n",
    "        on_file_obj_ddl_change,\n",
    "    )\n",
    "    # All primitive asset-based object template handles\n",
    "    prim_obj_ddl, sel_prim_obj_handle = set_handle_ddl_widget(\n",
    "        prim_obj_handles,\n",
    "        \"Primitive-based Object\",\n",
    "        sel_prim_obj_handle,\n",
    "        on_prim_obj_ddl_change,\n",
    "    )\n",
    "    # All primitive asset handles template handles\n",
    "    prim_asset_ddl, sel_asset_handle = set_handle_ddl_widget(\n",
    "        prim_asset_handles, \"Primitive Asset\", sel_asset_handle, on_prim_ddl_change\n",
    "    )\n",
    "    # Display DDLs\n",
    "    ipydisplay(file_obj_ddl)\n",
    "    ipydisplay(prim_obj_ddl)\n",
    "    ipydisplay(prim_asset_ddl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Initialize Simulator and Load Scene { display-mode: \"form\" }\n",
    "\n",
    "# convienience functions defined in Utility cell manage global variables\n",
    "sim_settings = make_default_settings()\n",
    "# set globals: sim,\n",
    "make_simulator_from_settings(sim_settings)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#Interactivity in Habitat-sim\n",
    "\n",
    "This tutorial covers how to configure and use the Habitat-sim object manipulation API to setup and run physical interaction simulations.\n",
    "\n",
    "## Outline:\n",
    "This section is divided into four use-case driven sub-sections:\n",
    "1.   Introduction to Interactivity\n",
    "2.   Physical Reasoning\n",
    "3.   Generating Scene Clutter on the NavMesh\n",
    "4.   Continuous Embodied Navigation\n",
    "\n",
    "For more tutorial examples and details see the [Interactive Rigid Objects tutorial](https://aihabitat.org/docs/habitat-sim/rigid-object-tutorial.html) also available for Colab [here](https://github.com/facebookresearch/habitat-sim/blob/main/examples/tutorials/colabs/rigid_object_tutorial.ipynb).\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Introduction to Interactivity\n",
    "\n",
    "####Easily add an object and simulate!\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Select a Simulation Object Template: { display-mode: \"form\" }\n",
    "# @markdown Use the dropdown menu below to select an object template for use in the following examples.\n",
    "\n",
    "# @markdown File-based object templates are loaded from and named after an asset file (e.g. banana.glb), while Primitive-based object templates are generated programmatically (e.g. uv_sphere) with handles (name/key for reference) uniquely generated from a specific parameterization.\n",
    "\n",
    "# @markdown See the Advanced Features tutorial for more details about asset configuration.\n",
    "\n",
    "build_widget_ui(obj_attr_mgr, prim_attr_mgr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Add either a File-based or Primitive Asset-based object to the scene at a user-specified location.{ display-mode: \"form\" }\n",
    "# @markdown Running this will add a physically-modelled object of the selected type to the scene at the location specified by user, simulate forward for a few seconds and save a movie of the results.\n",
    "\n",
    "# @markdown Choose either the primitive or file-based template recently selected in the dropdown:\n",
    "obj_template_handle = sel_file_obj_handle\n",
    "asset_tempalte_handle = sel_asset_handle\n",
    "object_type = \"File-based\"  # @param [\"File-based\",\"Primitive-based\"]\n",
    "if \"File\" in object_type:\n",
    "    # Handle File-based object handle\n",
    "    obj_template_handle = sel_file_obj_handle\n",
    "elif \"Primitive\" in object_type:\n",
    "    # Handle Primitive-based object handle\n",
    "    obj_template_handle = sel_prim_obj_handle\n",
    "else:\n",
    "    # Unknown - defaults to file-based\n",
    "    pass\n",
    "\n",
    "# @markdown Configure the initial object location (local offset from the agent body node):\n",
    "# default : offset=np.array([0,2.0,-1.5]), orientation=np.quaternion(1,0,0,0)\n",
    "offset_x = 0.5  # @param {type:\"slider\", min:-2, max:2, step:0.1}\n",
    "offset_y = 1.4  # @param {type:\"slider\", min:0, max:3.0, step:0.1}\n",
    "offset_z = -1.5  # @param {type:\"slider\", min:-3, max:0, step:0.1}\n",
    "offset = np.array([offset_x, offset_y, offset_z])\n",
    "\n",
    "# @markdown Configure the initial object orientation via local Euler angle (degrees):\n",
    "orientation_x = 0  # @param {type:\"slider\", min:-180, max:180, step:1}\n",
    "orientation_y = 0  # @param {type:\"slider\", min:-180, max:180, step:1}\n",
    "orientation_z = 0  # @param {type:\"slider\", min:-180, max:180, step:1}\n",
    "\n",
    "# compose the rotations\n",
    "rotation_x = mn.Quaternion.rotation(mn.Deg(orientation_x), mn.Vector3(1.0, 0, 0))\n",
    "rotation_y = mn.Quaternion.rotation(mn.Deg(orientation_y), mn.Vector3(1.0, 0, 0))\n",
    "rotation_z = mn.Quaternion.rotation(mn.Deg(orientation_z), mn.Vector3(1.0, 0, 0))\n",
    "orientation = rotation_z * rotation_y * rotation_x\n",
    "\n",
    "# Add object instantiated by desired template using template handle\n",
    "obj_1 = rigid_obj_mgr.add_object_by_template_handle(obj_template_handle)\n",
    "\n",
    "# @markdown Note: agent local coordinate system is Y up and -Z forward.\n",
    "# Move object to be in front of the agent\n",
    "set_object_state_from_agent(sim, obj_1, offset=offset, orientation=orientation)\n",
    "\n",
    "# display a still frame of the scene after the object is added if RGB sensor is enabled\n",
    "observations = sim.get_sensor_observations()\n",
    "if display and sim_settings[\"color_sensor_1st_person\"]:\n",
    "    display_sample(observations[\"color_sensor_1st_person\"])\n",
    "\n",
    "example_type = \"adding objects test\"\n",
    "make_sim_and_vid_button(example_type)\n",
    "make_clear_all_objects_button()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "\n",
    "\n",
    "\n",
    "## Physical Reasoning"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This section demonstrates simple setups for physical reasoning tasks in Habitat-sim with a fixed camera position collecting data:\n",
    "- Scripted vs. Dynamic Motion\n",
    "- Object Permanence\n",
    "- Physical plausibility classification\n",
    "- Trajectory Prediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Select object templates from the GUI: { display-mode: \"form\" }\n",
    "\n",
    "build_widget_ui(obj_attr_mgr, prim_attr_mgr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Scripted vs. Dynamic Motion { display-mode: \"form\" }\n",
    "# @markdown A quick script to generate video data for AI classification of dynamically dropping vs. kinematically moving objects.\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "# @markdown Set the scene as dynamic or kinematic:\n",
    "scenario_is_kinematic = True  # @param {type:\"boolean\"}\n",
    "\n",
    "# add the selected object\n",
    "obj_1 = rigid_obj_mgr.add_object_by_template_handle(sel_file_obj_handle)\n",
    "\n",
    "# place the object\n",
    "set_object_state_from_agent(\n",
    "    sim, obj_1, offset=np.array([0, 2.0, -1.0]), orientation=ut.random_quaternion()\n",
    ")\n",
    "\n",
    "if scenario_is_kinematic:\n",
    "    # use the velocity control struct to setup a constant rate kinematic motion\n",
    "    obj_1.motion_type = habitat_sim.physics.MotionType.KINEMATIC\n",
    "    vel_control = obj_1.velocity_control\n",
    "    vel_control.controlling_lin_vel = True\n",
    "    vel_control.linear_velocity = np.array([0, -1.0, 0])\n",
    "\n",
    "# simulate and collect observations\n",
    "example_type = \"kinematic vs dynamic\"\n",
    "observations = simulate(sim, dt=2.0)\n",
    "if make_video:\n",
    "    vut.make_video(\n",
    "        observations,\n",
    "        \"color_sensor_1st_person\",\n",
    "        \"color\",\n",
    "        output_path + example_type,\n",
    "        open_vid=show_video,\n",
    "    )\n",
    "\n",
    "rigid_obj_mgr.remove_all_objects()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Object Permanence { display-mode: \"form\" }\n",
    "# @markdown This example script demonstrates a possible object permanence task.\n",
    "# @markdown Two objects are dropped behind an occluder. One is removed while occluded.\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "\n",
    "# @markdown 1. Add the two dynamic objects.\n",
    "# add the selected objects\n",
    "obj_1 = rigid_obj_mgr.add_object_by_template_handle(sel_file_obj_handle)\n",
    "obj_2 = rigid_obj_mgr.add_object_by_template_handle(sel_file_obj_handle)\n",
    "\n",
    "# place the objects\n",
    "set_object_state_from_agent(\n",
    "    sim, obj_1, offset=np.array([0.5, 2.0, -1.0]), orientation=ut.random_quaternion()\n",
    ")\n",
    "set_object_state_from_agent(\n",
    "    sim,\n",
    "    obj_2,\n",
    "    offset=np.array([-0.5, 2.0, -1.0]),\n",
    "    orientation=ut.random_quaternion(),\n",
    ")\n",
    "\n",
    "# @markdown 2. Configure and add an occluder from a scaled cube primitive.\n",
    "# Get a default cube primitive template\n",
    "cube_handle = obj_attr_mgr.get_template_handles(\"cube\")[0]\n",
    "cube_template_cpy = obj_attr_mgr.get_template_by_handle(cube_handle)\n",
    "# Modify the template's configured scale.\n",
    "cube_template_cpy.scale = np.array([0.32, 0.075, 0.01])\n",
    "# Register the modified template under a new name.\n",
    "obj_attr_mgr.register_template(cube_template_cpy, \"occluder_cube\")\n",
    "# Instance and place the occluder object from the template.\n",
    "occluder_obj = rigid_obj_mgr.add_object_by_template_handle(\"occluder_cube\")\n",
    "set_object_state_from_agent(sim, occluder_obj, offset=np.array([0.0, 1.4, -0.4]))\n",
    "occluder_obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC\n",
    "# fmt off\n",
    "# @markdown 3. Simulate at 60Hz, removing one object when it's center of mass drops below that of the occluder.\n",
    "# fmt on\n",
    "# Simulate and remove object when it passes the midpoint of the occluder\n",
    "dt = 2.0\n",
    "print(\"Simulating \" + str(dt) + \" world seconds.\")\n",
    "observations = []\n",
    "# simulate at 60Hz to the nearest fixed timestep\n",
    "start_time = sim.get_world_time()\n",
    "\n",
    "while sim.get_world_time() < start_time + dt:\n",
    "    sim.step_physics(1.0 / 60.0)\n",
    "    # remove the object once it passes the occluder center and it still exists/hasn't already been removed\n",
    "    if obj_2.is_alive and obj_2.translation[1] <= occluder_obj.translation[1]:\n",
    "        rigid_obj_mgr.remove_object_by_id(obj_2.object_id)\n",
    "    observations.append(sim.get_sensor_observations())\n",
    "\n",
    "example_type = \"object permanence\"\n",
    "if make_video:\n",
    "    vut.make_video(\n",
    "        observations,\n",
    "        \"color_sensor_1st_person\",\n",
    "        \"color\",\n",
    "        output_path + example_type,\n",
    "        open_vid=show_video,\n",
    "    )\n",
    "rigid_obj_mgr.remove_all_objects()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Physical Plausibility Classification { display-mode: \"form\" }\n",
    "# @markdown This example demonstrates a physical plausibility expirement. A sphere\n",
    "# @markdown is dropped onto the back of a couch to roll onto the floor. Optionally,\n",
    "# @markdown an invisible plane is introduced for the sphere to roll onto producing\n",
    "# @markdown non-physical motion.\n",
    "\n",
    "introduce_surface = True  # @param{type:\"boolean\"}\n",
    "\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "\n",
    "# add a rolling object\n",
    "obj_attr_mgr = sim.get_object_template_manager()\n",
    "sphere_handle = obj_attr_mgr.get_template_handles(\"uvSphereSolid\")[0]\n",
    "obj_1 = rigid_obj_mgr.add_object_by_template_handle(sphere_handle)\n",
    "set_object_state_from_agent(sim, obj_1, offset=np.array([1.0, 1.6, -1.95]))\n",
    "\n",
    "if introduce_surface:\n",
    "    # optionally add invisible surface\n",
    "    cube_handle = obj_attr_mgr.get_template_handles(\"cube\")[0]\n",
    "    cube_template_cpy = obj_attr_mgr.get_template_by_handle(cube_handle)\n",
    "    # Modify the template.\n",
    "    cube_template_cpy.scale = np.array([1.0, 0.04, 1.0])\n",
    "    surface_is_visible = False  # @param{type:\"boolean\"}\n",
    "    cube_template_cpy.is_visibile = surface_is_visible\n",
    "    # Register the modified template under a new name.\n",
    "    obj_attr_mgr.register_template(cube_template_cpy, \"invisible_surface\")\n",
    "\n",
    "    # Instance and place the surface object from the template.\n",
    "    surface_obj = rigid_obj_mgr.add_object_by_template_handle(\"invisible_surface\")\n",
    "    set_object_state_from_agent(sim, surface_obj, offset=np.array([0.4, 0.88, -1.6]))\n",
    "    surface_obj.motion_type = habitat_sim.physics.MotionType.STATIC\n",
    "\n",
    "\n",
    "example_type = \"physical plausibility\"\n",
    "observations = simulate(sim, dt=3.0)\n",
    "if make_video:\n",
    "    vut.make_video(\n",
    "        observations,\n",
    "        \"color_sensor_1st_person\",\n",
    "        \"color\",\n",
    "        output_path + example_type,\n",
    "        open_vid=show_video,\n",
    "    )\n",
    "rigid_obj_mgr.remove_all_objects()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Trajectory Prediction { display-mode: \"form\" }\n",
    "# @markdown This example demonstrates setup of a trajectory prediction task.\n",
    "# @markdown Boxes are placed in a target zone and a sphere is given an initial\n",
    "# @markdown velocity with the goal of knocking the boxes off the counter.\n",
    "\n",
    "# @markdown ---\n",
    "# @markdown Configure Parameters:\n",
    "\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "\n",
    "seed = 2  # @param{type:\"integer\"}\n",
    "random.seed(seed)\n",
    "sim.seed(seed)\n",
    "np.random.seed(seed)\n",
    "\n",
    "# setup agent state manually to face the bar\n",
    "agent_state = sim.agents[0].state\n",
    "agent_state.position = np.array([-1.97496, 0.072447, -2.0894])\n",
    "agent_state.rotation = ut.quat_from_coeffs([0, -1, 0, 0])\n",
    "sim.agents[0].set_state(agent_state)\n",
    "\n",
    "# load the target objects\n",
    "cheezit_handle = obj_attr_mgr.get_template_handles(\"cheezit\")[0]\n",
    "# create range from center and half-extent\n",
    "target_zone = mn.Range3D.from_center(\n",
    "    mn.Vector3(-2.07496, 1.07245, -0.2894), mn.Vector3(0.5, 0.05, 0.1)\n",
    ")\n",
    "num_targets = 9  # @param{type:\"integer\"}\n",
    "for _target in range(num_targets):\n",
    "    obj = rigid_obj_mgr.add_object_by_template_handle(cheezit_handle)\n",
    "    # rotate boxes off of their sides\n",
    "    obj.rotation = mn.Quaternion.rotation(\n",
    "        mn.Rad(-mn.math.pi_half), mn.Vector3(1.0, 0, 0)\n",
    "    )\n",
    "    # sample state from the target zone\n",
    "    if not sample_object_state(sim, obj, False, True, 100, target_zone):\n",
    "        rigid_obj_mgr.remove_object_by_id(obj.object_id)\n",
    "\n",
    "\n",
    "show_target_zone = False  # @param{type:\"boolean\"}\n",
    "if show_target_zone:\n",
    "    # Get and modify the wire cube template from the range\n",
    "    cube_handle = obj_attr_mgr.get_template_handles(\"cubeWireframe\")[0]\n",
    "    cube_template_cpy = obj_attr_mgr.get_template_by_handle(cube_handle)\n",
    "    cube_template_cpy.scale = target_zone.size()\n",
    "    cube_template_cpy.is_collidable = False\n",
    "    # Register the modified template under a new name.\n",
    "    obj_attr_mgr.register_template(cube_template_cpy, \"target_zone\")\n",
    "    # instance and place the object from the template\n",
    "    target_zone_obj = rigid_obj_mgr.add_object_by_template_handle(\"target_zone\")\n",
    "    target_zone_obj.translation = target_zone.center()\n",
    "    target_zone_obj.motion_type = habitat_sim.physics.MotionType.STATIC\n",
    "    # print(\"target_zone_center = \" + str(target_zone_obj.translation))\n",
    "\n",
    "# @markdown ---\n",
    "# @markdown ###Ball properties:\n",
    "# load the ball\n",
    "sphere_handle = obj_attr_mgr.get_template_handles(\"uvSphereSolid\")[0]\n",
    "sphere_template_cpy = obj_attr_mgr.get_template_by_handle(sphere_handle)\n",
    "# @markdown Mass:\n",
    "ball_mass = 5.01  # @param {type:\"slider\", min:0.01, max:50.0, step:0.01}\n",
    "sphere_template_cpy.mass = ball_mass\n",
    "obj_attr_mgr.register_template(sphere_template_cpy, \"ball\")\n",
    "\n",
    "ball_obj = rigid_obj_mgr.add_object_by_template_handle(\"ball\")\n",
    "set_object_state_from_agent(sim, ball_obj, offset=np.array([0, 1.4, 0]))\n",
    "\n",
    "# @markdown Initial linear velocity (m/sec):\n",
    "lin_vel_x = 0  # @param {type:\"slider\", min:-10, max:10, step:0.1}\n",
    "lin_vel_y = 1  # @param {type:\"slider\", min:-10, max:10, step:0.1}\n",
    "lin_vel_z = 5  # @param {type:\"slider\", min:0, max:10, step:0.1}\n",
    "ball_obj.linear_velocity = mn.Vector3(lin_vel_x, lin_vel_y, lin_vel_z)\n",
    "\n",
    "# @markdown Initial angular velocity (rad/sec):\n",
    "ang_vel_x = 0  # @param {type:\"slider\", min:-100, max:100, step:0.1}\n",
    "ang_vel_y = 0  # @param {type:\"slider\", min:-100, max:100, step:0.1}\n",
    "ang_vel_z = 0  # @param {type:\"slider\", min:-100, max:100, step:0.1}\n",
    "ball_obj.angular_velocity = mn.Vector3(ang_vel_x, ang_vel_y, ang_vel_z)\n",
    "\n",
    "example_type = \"trajectory prediction\"\n",
    "observations = simulate(sim, dt=3.0)\n",
    "if make_video:\n",
    "    vut.make_video(\n",
    "        observations,\n",
    "        \"color_sensor_1st_person\",\n",
    "        \"color\",\n",
    "        output_path + example_type,\n",
    "        open_vid=show_video,\n",
    "    )\n",
    "rigid_obj_mgr.remove_all_objects()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Generating Scene Clutter on the NavMesh\n",
    "\n",
    "The NavMesh can be used to place objects on surfaces in the scene. Once objects are placed they can be set to MotionType::STATIC, indiciating that they are not moveable (kinematics and dynamics are disabled for STATIC objects). The NavMesh can then be recomputed including STATIC object meshes in the voxelization.\n",
    "\n",
    "This example demonstrates using the NavMesh to generate a cluttered scene for navigation. In this script we will:\n",
    "\n",
    "- Place objects off the NavMesh\n",
    "- Set them to MotionType::STATIC\n",
    "- Recompute the NavMesh including STATIC objects\n",
    "- Visualize the results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Initialize Simulator and Load Scene { display-mode: \"form\" }\n",
    "# @markdown (load the apartment_1 scene for clutter generation in an open space)\n",
    "sim_settings = make_default_settings()\n",
    "sim_settings[\"scene\"] = \"./data/scene_datasets/habitat-test-scenes/apartment_1.glb\"\n",
    "sim_settings[\"sensor_pitch\"] = 0\n",
    "\n",
    "make_simulator_from_settings(sim_settings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Select clutter object from the GUI: { display-mode: \"form\" }\n",
    "\n",
    "build_widget_ui(obj_attr_mgr, prim_attr_mgr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Clutter Generation Script\n",
    "# @markdown Configure some example parameters:\n",
    "\n",
    "seed = 2  # @param {type:\"integer\"}\n",
    "random.seed(seed)\n",
    "sim.seed(seed)\n",
    "np.random.seed(seed)\n",
    "\n",
    "# position the agent\n",
    "sim.agents[0].scene_node.translation = mn.Vector3(0.5, -1.60025, 6.15)\n",
    "print(sim.agents[0].scene_node.rotation)\n",
    "agent_orientation_y = -23  # @param{type:\"integer\"}\n",
    "sim.agents[0].scene_node.rotation = mn.Quaternion.rotation(\n",
    "    mn.Deg(agent_orientation_y), mn.Vector3(0, 1.0, 0)\n",
    ")\n",
    "\n",
    "num_objects = 10  # @param {type:\"slider\", min:0, max:20, step:1}\n",
    "object_scale = 5  # @param {type:\"slider\", min:1.0, max:10.0, step:0.1}\n",
    "\n",
    "# scale up the selected object\n",
    "sel_obj_template_cpy = obj_attr_mgr.get_template_by_handle(sel_file_obj_handle)\n",
    "sel_obj_template_cpy.scale = mn.Vector3(object_scale)\n",
    "obj_attr_mgr.register_template(sel_obj_template_cpy, \"scaled_sel_obj\")\n",
    "\n",
    "# add the selected object\n",
    "sim.navmesh_visualization = True\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "fails = 0\n",
    "for _obj in range(num_objects):\n",
    "    obj_1 = rigid_obj_mgr.add_object_by_template_handle(\"scaled_sel_obj\")\n",
    "\n",
    "    # place the object\n",
    "    placement_success = sample_object_state(\n",
    "        sim, obj_1, from_navmesh=True, maintain_object_up=True, max_tries=100\n",
    "    )\n",
    "    if not placement_success:\n",
    "        fails += 1\n",
    "        rigid_obj_mgr.remove_object_by_id(obj_1.object_id)\n",
    "    else:\n",
    "        # set the objects to STATIC so they can be added to the NavMesh\n",
    "        obj_1.motion_type = habitat_sim.physics.MotionType.STATIC\n",
    "\n",
    "print(\"Placement fails = \" + str(fails) + \"/\" + str(num_objects))\n",
    "\n",
    "# recompute the NavMesh with STATIC objects\n",
    "navmesh_settings = habitat_sim.NavMeshSettings()\n",
    "navmesh_settings.set_defaults()\n",
    "navmesh_success = sim.recompute_navmesh(\n",
    "    sim.pathfinder, navmesh_settings, include_static_objects=True\n",
    ")\n",
    "\n",
    "# simulate and collect observations\n",
    "example_type = \"clutter generation\"\n",
    "observations = simulate(sim, dt=2.0)\n",
    "if make_video:\n",
    "    vut.make_video(\n",
    "        observations,\n",
    "        \"color_sensor_1st_person\",\n",
    "        \"color\",\n",
    "        output_path + example_type,\n",
    "        open_vid=show_video,\n",
    "    )\n",
    "obj_attr_mgr.remove_template_by_handle(\"scaled_sel_obj\")\n",
    "rigid_obj_mgr.remove_all_objects()\n",
    "sim.navmesh_visualization = False"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Embodied Continuous Navigation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The following example demonstrates setup and excecution of an embodied navigation and interaction scenario. An object and an agent embodied by a rigid locobot mesh are placed randomly on the NavMesh. A path is computed for the agent to reach the object which is executed by a continuous path-following controller. The object is then kinematically gripped by the agent and a second path is computed for the agent to reach a goal location, also executed by a continuous controller. The gripped object is then released and thrown in front of the agent.\n",
    "\n",
    "Note: for a more detailed explanation of the NavMesh see Habitat-sim Basics tutorial."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# @title Select target object from the GUI: { display-mode: \"form\" }\n",
    "\n",
    "build_widget_ui(obj_attr_mgr, prim_attr_mgr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Continuous Path Follower Example { display-mode: \"form\" }\n",
    "# @markdown A python Class to provide waypoints along a path given agent states\n",
    "\n",
    "\n",
    "class ContinuousPathFollower:\n",
    "    def __init__(self, sim, path, agent_scene_node, waypoint_threshold):\n",
    "        self._sim = sim\n",
    "        self._points = path.points[:]\n",
    "        assert len(self._points) > 0\n",
    "        self._length = path.geodesic_distance\n",
    "        self._node = agent_scene_node\n",
    "        self._threshold = waypoint_threshold\n",
    "        self._step_size = 0.01\n",
    "        self.progress = 0  # geodesic distance -> [0,1]\n",
    "        self.waypoint = path.points[0]\n",
    "\n",
    "        # setup progress waypoints\n",
    "        _point_progress = [0]\n",
    "        _segment_tangents = []\n",
    "        _length = self._length\n",
    "        for ix, point in enumerate(self._points):\n",
    "            if ix > 0:\n",
    "                segment = point - self._points[ix - 1]\n",
    "                segment_length = np.linalg.norm(segment)\n",
    "                segment_tangent = segment / segment_length\n",
    "                _point_progress.append(\n",
    "                    segment_length / _length + _point_progress[ix - 1]\n",
    "                )\n",
    "                # t-1 -> t\n",
    "                _segment_tangents.append(segment_tangent)\n",
    "        self._point_progress = _point_progress\n",
    "        self._segment_tangents = _segment_tangents\n",
    "        # final tangent is duplicated\n",
    "        self._segment_tangents.append(self._segment_tangents[-1])\n",
    "\n",
    "        print(\"self._length = \" + str(self._length))\n",
    "        print(\"num points = \" + str(len(self._points)))\n",
    "        print(\"self._point_progress = \" + str(self._point_progress))\n",
    "        print(\"self._segment_tangents = \" + str(self._segment_tangents))\n",
    "\n",
    "    def pos_at(self, progress):\n",
    "        if progress <= 0:\n",
    "            return self._points[0]\n",
    "        elif progress >= 1.0:\n",
    "            return self._points[-1]\n",
    "\n",
    "        path_ix = 0\n",
    "        for ix, prog in enumerate(self._point_progress):\n",
    "            if prog > progress:\n",
    "                path_ix = ix\n",
    "                break\n",
    "\n",
    "        segment_distance = self._length * (progress - self._point_progress[path_ix - 1])\n",
    "        return (\n",
    "            self._points[path_ix - 1]\n",
    "            + self._segment_tangents[path_ix - 1] * segment_distance\n",
    "        )\n",
    "\n",
    "    def update_waypoint(self):\n",
    "        if self.progress < 1.0:\n",
    "            wp_disp = self.waypoint - self._node.absolute_translation\n",
    "            wp_dist = np.linalg.norm(wp_disp)\n",
    "            node_pos = self._node.absolute_translation\n",
    "            step_size = self._step_size\n",
    "            threshold = self._threshold\n",
    "            while wp_dist < threshold:\n",
    "                self.progress += step_size\n",
    "                self.waypoint = self.pos_at(self.progress)\n",
    "                if self.progress >= 1.0:\n",
    "                    break\n",
    "                wp_disp = self.waypoint - node_pos\n",
    "                wp_dist = np.linalg.norm(wp_disp)\n",
    "\n",
    "\n",
    "def setup_path_visualization(path_follower, vis_samples=100):\n",
    "    vis_objs = []\n",
    "    sphere_handle = obj_attr_mgr.get_template_handles(\"uvSphereSolid\")[0]\n",
    "    sphere_template_cpy = obj_attr_mgr.get_template_by_handle(sphere_handle)\n",
    "    sphere_template_cpy.scale *= 0.2\n",
    "    template_id = obj_attr_mgr.register_template(sphere_template_cpy, \"mini-sphere\")\n",
    "    print(\"template_id = \" + str(template_id))\n",
    "    if template_id < 0:\n",
    "        return None\n",
    "    vis_objs.append(rigid_obj_mgr.add_object_by_template_handle(sphere_handle))\n",
    "\n",
    "    for point in path_follower._points:\n",
    "        cp_obj = rigid_obj_mgr.add_object_by_template_handle(sphere_handle)\n",
    "        if cp_obj.object_id < 0:\n",
    "            print(cp_obj.object_id)\n",
    "            return None\n",
    "        cp_obj.translation = point\n",
    "        vis_objs.append(cp_obj)\n",
    "\n",
    "    for i in range(vis_samples):\n",
    "        cp_obj = rigid_obj_mgr.add_object_by_template_handle(\"mini-sphere\")\n",
    "        if cp_obj.object_id < 0:\n",
    "            print(cp_obj.object_id)\n",
    "            return None\n",
    "        cp_obj.translation = path_follower.pos_at(float(i / vis_samples))\n",
    "        vis_objs.append(cp_obj)\n",
    "\n",
    "    for obj in vis_objs:\n",
    "        if obj.object_id < 0:\n",
    "            print(obj.object_id)\n",
    "            return None\n",
    "\n",
    "    for obj in vis_objs:\n",
    "        obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC\n",
    "\n",
    "    return vis_objs\n",
    "\n",
    "\n",
    "def track_waypoint(waypoint, rs, vc, dt=1.0 / 60.0):\n",
    "    angular_error_threshold = 0.5\n",
    "    max_linear_speed = 1.0\n",
    "    max_turn_speed = 1.0\n",
    "    glob_forward = rs.rotation.transform_vector(mn.Vector3(0, 0, -1.0)).normalized()\n",
    "    glob_right = rs.rotation.transform_vector(mn.Vector3(-1.0, 0, 0)).normalized()\n",
    "    to_waypoint = mn.Vector3(waypoint) - rs.translation\n",
    "    u_to_waypoint = to_waypoint.normalized()\n",
    "    angle_error = float(mn.math.angle(glob_forward, u_to_waypoint))\n",
    "\n",
    "    new_velocity = 0\n",
    "    if angle_error < angular_error_threshold:\n",
    "        # speed up to max\n",
    "        new_velocity = (vc.linear_velocity[2] - max_linear_speed) / 2.0\n",
    "    else:\n",
    "        # slow down to 0\n",
    "        new_velocity = (vc.linear_velocity[2]) / 2.0\n",
    "    vc.linear_velocity = mn.Vector3(0, 0, new_velocity)\n",
    "\n",
    "    # angular part\n",
    "    rot_dir = 1.0\n",
    "    if mn.math.dot(glob_right, u_to_waypoint) < 0:\n",
    "        rot_dir = -1.0\n",
    "    angular_correction = 0.0\n",
    "    if angle_error > (max_turn_speed * 10.0 * dt):\n",
    "        angular_correction = max_turn_speed\n",
    "    else:\n",
    "        angular_correction = angle_error / 2.0\n",
    "\n",
    "    vc.angular_velocity = mn.Vector3(\n",
    "        0, np.clip(rot_dir * angular_correction, -max_turn_speed, max_turn_speed), 0\n",
    "    )\n",
    "\n",
    "\n",
    "# grip/release and sync gripped object state kineamtically\n",
    "class ObjectGripper:\n",
    "    def __init__(\n",
    "        self,\n",
    "        sim,\n",
    "        agent_scene_node,\n",
    "        end_effector_offset,\n",
    "    ):\n",
    "        self._sim = sim\n",
    "        self._node = agent_scene_node\n",
    "        self._offset = end_effector_offset\n",
    "        self._gripped_obj = None\n",
    "        self._gripped_obj_buffer = 0  # bounding box y dimension offset of the offset\n",
    "\n",
    "    def sync_states(self):\n",
    "        if self._gripped_obj is not None:\n",
    "            agent_t = self._node.absolute_transformation_matrix()\n",
    "            agent_t.translation += self._offset + mn.Vector3(\n",
    "                0, self._gripped_obj_buffer, 0.0\n",
    "            )\n",
    "            self._gripped_obj.transformation = agent_t\n",
    "\n",
    "    def grip(self, obj):\n",
    "        if self._gripped_obj is not None:\n",
    "            print(\"Oops, can't carry more than one item.\")\n",
    "            return\n",
    "        self._gripped_obj = obj\n",
    "        obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC\n",
    "        object_node = obj.root_scene_node\n",
    "        self._gripped_obj_buffer = object_node.cumulative_bb.size_y() / 2.0\n",
    "        self.sync_states()\n",
    "\n",
    "    def release(self):\n",
    "        if self._gripped_obj is None:\n",
    "            print(\"Oops, can't release nothing.\")\n",
    "            return\n",
    "        self._gripped_obj.motion_type = habitat_sim.physics.MotionType.DYNAMIC\n",
    "        self._gripped_obj.linear_velocity = (\n",
    "            self._node.absolute_transformation_matrix().transform_vector(\n",
    "                mn.Vector3(0, 0, -1.0)\n",
    "            )\n",
    "            + mn.Vector3(0, 2.0, 0)\n",
    "        )\n",
    "        self._gripped_obj = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @title Embodied Continuous Navigation Example { display-mode: \"form\" }\n",
    "# @markdown This example cell runs the object retrieval task.\n",
    "\n",
    "# @markdown First the Simulator is re-initialized with:\n",
    "# @markdown - a 3rd person camera view\n",
    "# @markdown - modified 1st person sensor placement\n",
    "sim_settings = make_default_settings()\n",
    "# fmt: off\n",
    "sim_settings[\"scene\"] = \"./data/scene_datasets/mp3d_example/17DRP5sb8fy/17DRP5sb8fy.glb\"  # @param{type:\"string\"}\n",
    "# fmt: on\n",
    "sim_settings[\"sensor_pitch\"] = 0\n",
    "sim_settings[\"sensor_height\"] = 0.6\n",
    "sim_settings[\"color_sensor_3rd_person\"] = True\n",
    "sim_settings[\"depth_sensor_1st_person\"] = True\n",
    "sim_settings[\"semantic_sensor_1st_person\"] = True\n",
    "\n",
    "make_simulator_from_settings(sim_settings)\n",
    "\n",
    "default_nav_mesh_settings = habitat_sim.NavMeshSettings()\n",
    "default_nav_mesh_settings.set_defaults()\n",
    "inflated_nav_mesh_settings = habitat_sim.NavMeshSettings()\n",
    "inflated_nav_mesh_settings.set_defaults()\n",
    "inflated_nav_mesh_settings.agent_radius = 0.2\n",
    "inflated_nav_mesh_settings.agent_height = 1.5\n",
    "recompute_successful = sim.recompute_navmesh(sim.pathfinder, inflated_nav_mesh_settings)\n",
    "if not recompute_successful:\n",
    "    print(\"Failed to recompute navmesh!\")\n",
    "\n",
    "# @markdown ---\n",
    "# @markdown ### Set other example parameters:\n",
    "seed = 24  # @param {type:\"integer\"}\n",
    "random.seed(seed)\n",
    "sim.seed(seed)\n",
    "np.random.seed(seed)\n",
    "\n",
    "sim.config.sim_cfg.allow_sliding = True  # @param {type:\"boolean\"}\n",
    "\n",
    "print(sel_file_obj_handle)\n",
    "# load a selected target object and place it on the NavMesh\n",
    "obj_1 = rigid_obj_mgr.add_object_by_template_handle(sel_file_obj_handle)\n",
    "\n",
    "# load the locobot_merged asset\n",
    "locobot_template_handle = obj_attr_mgr.get_file_template_handles(\"locobot\")[0]\n",
    "\n",
    "# add robot object to the scene with the agent/camera SceneNode attached\n",
    "locobot_obj = rigid_obj_mgr.add_object_by_template_handle(\n",
    "    locobot_template_handle, sim.agents[0].scene_node\n",
    ")\n",
    "\n",
    "# set the agent's body to kinematic since we will be updating position manually\n",
    "locobot_obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC\n",
    "\n",
    "# create and configure a new VelocityControl structure\n",
    "# Note: this is NOT the object's VelocityControl, so it will not be consumed automatically in sim.step_physics\n",
    "vel_control = habitat_sim.physics.VelocityControl()\n",
    "vel_control.controlling_lin_vel = True\n",
    "vel_control.lin_vel_is_local = True\n",
    "vel_control.controlling_ang_vel = True\n",
    "vel_control.ang_vel_is_local = True\n",
    "\n",
    "# reset observations and robot state\n",
    "locobot_obj.translation = sim.pathfinder.get_random_navigable_point()\n",
    "observations = []\n",
    "\n",
    "# get shortest path to the object from the agent position\n",
    "found_path = False\n",
    "path1 = habitat_sim.ShortestPath()\n",
    "path2 = habitat_sim.ShortestPath()\n",
    "while not found_path:\n",
    "    if not sample_object_state(\n",
    "        sim, obj_1, from_navmesh=True, maintain_object_up=True, max_tries=1000\n",
    "    ):\n",
    "        print(\"Couldn't find an initial object placement. Aborting.\")\n",
    "        break\n",
    "    path1.requested_start = locobot_obj.translation\n",
    "    path1.requested_end = obj_1.translation\n",
    "    path2.requested_start = path1.requested_end\n",
    "    path2.requested_end = sim.pathfinder.get_random_navigable_point()\n",
    "\n",
    "    found_path = sim.pathfinder.find_path(path1) and sim.pathfinder.find_path(path2)\n",
    "\n",
    "if not found_path:\n",
    "    print(\"Could not find path to object, aborting!\")\n",
    "\n",
    "vis_objs = []\n",
    "\n",
    "recompute_successful = sim.recompute_navmesh(sim.pathfinder, default_nav_mesh_settings)\n",
    "if not recompute_successful:\n",
    "    print(\"Failed to recompute navmesh 2!\")\n",
    "\n",
    "gripper = ObjectGripper(sim, locobot_obj.root_scene_node, np.array([0.0, 0.6, 0.0]))\n",
    "continuous_path_follower = ContinuousPathFollower(\n",
    "    sim, path1, locobot_obj.root_scene_node, waypoint_threshold=0.4\n",
    ")\n",
    "\n",
    "show_waypoint_indicators = False  # @param {type:\"boolean\"}\n",
    "time_step = 1.0 / 30.0\n",
    "for i in range(2):\n",
    "    if i == 1:\n",
    "        gripper.grip(obj_1)\n",
    "        continuous_path_follower = ContinuousPathFollower(\n",
    "            sim, path2, locobot_obj.root_scene_node, waypoint_threshold=0.4\n",
    "        )\n",
    "\n",
    "    if show_waypoint_indicators:\n",
    "        for vis_obj in vis_objs:\n",
    "            rigid_obj_mgr.remove_object_by_id(vis_obj.object_id)\n",
    "        vis_objs = setup_path_visualization(continuous_path_follower)\n",
    "\n",
    "    # manually control the object's kinematic state via velocity integration\n",
    "    start_time = sim.get_world_time()\n",
    "    max_time = 30.0\n",
    "    while (\n",
    "        continuous_path_follower.progress < 1.0\n",
    "        and sim.get_world_time() - start_time < max_time\n",
    "    ):\n",
    "        continuous_path_follower.update_waypoint()\n",
    "        if show_waypoint_indicators:\n",
    "            vis_objs[0].translation = continuous_path_follower.waypoint\n",
    "\n",
    "        if locobot_obj.object_id < 0:\n",
    "            print(\"locobot_id \" + str(locobot_obj.object_id))\n",
    "            break\n",
    "\n",
    "        previous_rigid_state = locobot_obj.rigid_state\n",
    "\n",
    "        # set velocities based on relative waypoint position/direction\n",
    "        track_waypoint(\n",
    "            continuous_path_follower.waypoint,\n",
    "            previous_rigid_state,\n",
    "            vel_control,\n",
    "            dt=time_step,\n",
    "        )\n",
    "\n",
    "        # manually integrate the rigid state\n",
    "        target_rigid_state = vel_control.integrate_transform(\n",
    "            time_step, previous_rigid_state\n",
    "        )\n",
    "\n",
    "        # snap rigid state to navmesh and set state to object/agent\n",
    "        end_pos = sim.step_filter(\n",
    "            previous_rigid_state.translation, target_rigid_state.translation\n",
    "        )\n",
    "        locobot_obj.translation = end_pos\n",
    "        locobot_obj.rotation = target_rigid_state.rotation\n",
    "\n",
    "        # Check if a collision occured\n",
    "        dist_moved_before_filter = (\n",
    "            target_rigid_state.translation - previous_rigid_state.translation\n",
    "        ).dot()\n",
    "        dist_moved_after_filter = (end_pos - previous_rigid_state.translation).dot()\n",
    "\n",
    "        # NB: There are some cases where ||filter_end - end_pos|| > 0 when a\n",
    "        # collision _didn't_ happen. One such case is going up stairs.  Instead,\n",
    "        # we check to see if the the amount moved after the application of the filter\n",
    "        # is _less_ than the amount moved before the application of the filter\n",
    "        EPS = 1e-5\n",
    "        collided = (dist_moved_after_filter + EPS) < dist_moved_before_filter\n",
    "\n",
    "        gripper.sync_states()\n",
    "        # run any dynamics simulation\n",
    "        sim.step_physics(time_step)\n",
    "\n",
    "        # render observation\n",
    "        observations.append(sim.get_sensor_observations())\n",
    "\n",
    "# release\n",
    "gripper.release()\n",
    "start_time = sim.get_world_time()\n",
    "while sim.get_world_time() - start_time < 2.0:\n",
    "    sim.step_physics(time_step)\n",
    "    observations.append(sim.get_sensor_observations())\n",
    "\n",
    "# video rendering with embedded 1st person view\n",
    "video_prefix = \"fetch\"\n",
    "if make_video:\n",
    "    overlay_dims = (int(sim_settings[\"width\"] / 5), int(sim_settings[\"height\"] / 5))\n",
    "    print(\"overlay_dims = \" + str(overlay_dims))\n",
    "    overlay_settings = [\n",
    "        {\n",
    "            \"obs\": \"color_sensor_1st_person\",\n",
    "            \"type\": \"color\",\n",
    "            \"dims\": overlay_dims,\n",
    "            \"pos\": (10, 10),\n",
    "            \"border\": 2,\n",
    "        },\n",
    "        {\n",
    "            \"obs\": \"depth_sensor_1st_person\",\n",
    "            \"type\": \"depth\",\n",
    "            \"dims\": overlay_dims,\n",
    "            \"pos\": (10, 30 + overlay_dims[1]),\n",
    "            \"border\": 2,\n",
    "        },\n",
    "        {\n",
    "            \"obs\": \"semantic_sensor_1st_person\",\n",
    "            \"type\": \"semantic\",\n",
    "            \"dims\": overlay_dims,\n",
    "            \"pos\": (10, 50 + overlay_dims[1] * 2),\n",
    "            \"border\": 2,\n",
    "        },\n",
    "    ]\n",
    "    print(\"overlay_settings = \" + str(overlay_settings))\n",
    "\n",
    "    vut.make_video(\n",
    "        observations=observations,\n",
    "        primary_obs=\"color_sensor_3rd_person\",\n",
    "        primary_obs_type=\"color\",\n",
    "        video_file=output_path + video_prefix,\n",
    "        fps=int(1.0 / time_step),\n",
    "        open_vid=show_video,\n",
    "        overlay_settings=overlay_settings,\n",
    "        depth_clip=10.0,\n",
    "    )\n",
    "\n",
    "# remove locobot while leaving the agent node for later use\n",
    "rigid_obj_mgr.remove_object_by_id(locobot_obj.object_id, delete_object_node=False)\n",
    "rigid_obj_mgr.remove_all_objects()"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "ECCV 2020: Habitat-sim Interactivity",
   "provenance": []
  },
  "jupytext": {
   "cell_metadata_filter": "-all",
   "formats": "nb_python//py:percent,colabs//ipynb",
   "main_language": "python",
   "notebook_metadata_filter": "all"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
