{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Overview\n",
    "This tutorial covers the basics of using Habitat 2.0 including: setting up\n",
    "the environment, creating custom environments, and creating new episode\n",
    "datasets."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Play a teaser video\n",
    "from dataclasses import dataclass\n",
    "\n",
    "from habitat.config.default import get_agent_config\n",
    "from habitat.config.default_structured_configs import (\n",
    "    MeasurementConfig,\n",
    "    ThirdRGBSensorConfig,\n",
    ")\n",
    "\n",
    "try:\n",
    "    from IPython.display import IFrame\n",
    "\n",
    "    # NOTE: this file is unreachable\n",
    "    IFrame(\n",
    "        src=\"https://drive.google.com/file/d/1ltrse38i8pnJPGAXlThylcdy8PMjUMKh/preview\",\n",
    "        width=640,\n",
    "        height=480,\n",
    "    )\n",
    "\n",
    "except Exception:\n",
    "    pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "# Imports\n",
    "import os\n",
    "\n",
    "import git\n",
    "import gym\n",
    "import imageio\n",
    "import numpy as np\n",
    "from hydra.core.config_store import ConfigStore\n",
    "\n",
    "import habitat\n",
    "import habitat.gym\n",
    "from habitat.core.embodied_task import Measure\n",
    "from habitat.core.registry import registry\n",
    "from habitat.tasks.rearrange.rearrange_sensors import RearrangeReward\n",
    "from habitat.tasks.rearrange.rearrange_task import RearrangeTask\n",
    "from habitat.utils.visualizations.utils import (\n",
    "    observations_to_image,\n",
    "    overlay_frame,\n",
    ")\n",
    "from habitat_sim.utils import viz_utils as vut\n",
    "\n",
    "# Quiet the Habitat simulator logging\n",
    "os.environ[\"MAGNUM_LOG\"] = \"quiet\"\n",
    "os.environ[\"HABITAT_SIM_LOG\"] = \"quiet\"\n",
    "\n",
    "\n",
    "def insert_render_options(config):\n",
    "    # Added settings to make rendering higher resolution for better visualization\n",
    "    with habitat.config.read_write(config):\n",
    "        config.habitat.simulator.concur_render = False\n",
    "        agent_config = get_agent_config(sim_config=config.habitat.simulator)\n",
    "        agent_config.sim_sensors.update(\n",
    "            {\"third_rgb_sensor\": ThirdRGBSensorConfig(height=512, width=512)}\n",
    "        )\n",
    "    return config\n",
    "\n",
    "\n",
    "import importlib\n",
    "\n",
    "# If the import block fails due to an error like \"'PIL.TiffTags' has no attribute\n",
    "# 'IFD'\", then restart the Colab runtime instance and rerun this cell and the previous cell.\n",
    "import PIL\n",
    "\n",
    "importlib.reload(\n",
    "    PIL.TiffTags  # type: ignore[attr-defined]\n",
    ")  # To potentially avoid PIL problem\n",
    "\n",
    "repo = git.Repo(\".\", search_parent_directories=True)\n",
    "dir_path = repo.working_tree_dir\n",
    "data_path = os.path.join(dir_path, \"data\")\n",
    "output_path = os.path.join(\n",
    "    dir_path, \"examples/tutorials/habitat_lab_visualization/\"\n",
    ")\n",
    "os.makedirs(output_path, exist_ok=True)\n",
    "os.chdir(dir_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Local installation\n",
    "Follow the steps on the [Habitat Lab README](https://github.com/facebookresearch/habitat-lab#installation)."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Quickstart\n",
    "\n",
    "Start with a minimal environment interaction loop using the Habitat API. This sets up the environment, takes random episodes, and then saves a video once the episode ends.\n",
    "\n",
    "If this is your first time running Habitat 2.0 code, the datasets will automatically download which include the ReplicaCAD scenes, episode datasets, and object assets. To manually download this data, run `python -m habitat_sim.utils.datasets_download --uids rearrange_task_assets`."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "with habitat.Env(\n",
    "    config=insert_render_options(\n",
    "        habitat.get_config(\n",
    "            os.path.join(\n",
    "                dir_path,\n",
    "                \"habitat-lab/habitat/config/benchmark/rearrange/skills/pick.yaml\",\n",
    "            ),\n",
    "        )\n",
    "    )\n",
    ") as env:\n",
    "    observations = env.reset()  # noqa: F841\n",
    "\n",
    "    print(\"Agent acting inside environment.\")\n",
    "    count_steps = 0\n",
    "    # To save the video\n",
    "    video_file_path = os.path.join(output_path, \"example_interact.mp4\")\n",
    "    video_writer = imageio.get_writer(video_file_path, fps=30)\n",
    "\n",
    "    while not env.episode_over:\n",
    "        observations = env.step(env.action_space.sample())  # noqa: F841\n",
    "        info = env.get_metrics()\n",
    "\n",
    "        render_obs = observations_to_image(observations, info)\n",
    "        render_obs = overlay_frame(render_obs, info)\n",
    "\n",
    "        video_writer.append_data(render_obs)\n",
    "\n",
    "        count_steps += 1\n",
    "    print(\"Episode finished after {} steps.\".format(count_steps))\n",
    "\n",
    "    video_writer.close()\n",
    "    if vut.is_notebook():\n",
    "        vut.display_video(video_file_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Gym API\n",
    "You can also use environments through the Gym API. For more information about how to use the Gym API and the supported tasks, see [this tutorial](https://github.com/facebookresearch/habitat-lab/blob/main/examples/tutorials/colabs/habitat2_gym_tutorial.ipynb)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "env = gym.make(\"HabitatRenderPick-v0\")\n",
    "\n",
    "video_file_path = os.path.join(output_path, \"example_interact.mp4\")\n",
    "video_writer = imageio.get_writer(video_file_path, fps=30)\n",
    "\n",
    "done = False\n",
    "env.reset()\n",
    "while not done:\n",
    "    obs, reward, done, info = env.step(env.action_space.sample())\n",
    "    video_writer.append_data(env.render(mode=\"rgb_array\"))\n",
    "\n",
    "video_writer.close()\n",
    "if vut.is_notebook():\n",
    "    vut.display_video(video_file_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "lines_to_next_cell": 2
   },
   "source": [
    "# Defining New Tasks\n",
    "\n",
    "We will define a task for the robot to navigate to and then pick up a target object in the environment. To support a new task we need:\n",
    "* A task of type `RearrangeTask` which implements the reset function.\n",
    "* Sensor definitions to populate the observation space.\n",
    "* Measurement definitions to define the reward, termination condition, and additional logging information.\n",
    "\n",
    "For other examples of task, sensor, and measurement definitions, [see here\n",
    "for existing tasks](https://github.com/facebookresearch/habitat-lab/tree/main/habitat-lab/habitat/tasks/rearrange/sub_tasks). Tasks, sensors, and measurements are connected through a config file that defines the task."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "@registry.register_task(name=\"RearrangeDemoNavPickTask-v0\")\n",
    "class NavPickTaskV1(RearrangeTask):\n",
    "    \"\"\"\n",
    "    Primarily this is used to implement the episode reset functionality.\n",
    "    Can also implement custom episode step functionality.\n",
    "    \"\"\"\n",
    "\n",
    "    def reset(self, episode):\n",
    "        self.target_object_index = np.random.randint(\n",
    "            0, self._sim.get_n_targets()\n",
    "        )\n",
    "        start_pos = self._sim.pathfinder.get_random_navigable_point()\n",
    "        self._sim.articulated_agent.base_pos = start_pos\n",
    "\n",
    "        # Put any reset logic here.\n",
    "        return super().reset(episode)\n",
    "\n",
    "\n",
    "@registry.register_measure\n",
    "class DistanceToTargetObject(Measure):\n",
    "    \"\"\"\n",
    "    Gets the Euclidean distance to the target object from the end-effector.\n",
    "    \"\"\"\n",
    "\n",
    "    cls_uuid: str = \"distance_to_object\"\n",
    "\n",
    "    def __init__(self, sim, config, *args, **kwargs):\n",
    "        self._sim = sim\n",
    "        self._config = config\n",
    "        super().__init__(**kwargs)\n",
    "\n",
    "    @staticmethod\n",
    "    def _get_uuid(*args, **kwargs):\n",
    "        return DistanceToTargetObject.cls_uuid\n",
    "\n",
    "    def reset_metric(self, *args, episode, **kwargs):\n",
    "        self.update_metric(*args, episode=episode, **kwargs)\n",
    "\n",
    "    def update_metric(self, *args, task, episode, **kwargs):\n",
    "        ee_pos = self._sim.articulated_agent.ee_transform().translation\n",
    "\n",
    "        idxs, _ = self._sim.get_targets()\n",
    "        scene_pos = self._sim.get_scene_pos()[idxs[task.target_object_index]]\n",
    "\n",
    "        # Metric information is stored in the `self._metric` variable.\n",
    "        self._metric = np.linalg.norm(scene_pos - ee_pos, ord=2, axis=-1)\n",
    "\n",
    "\n",
    "@registry.register_measure\n",
    "class NavPickReward(RearrangeReward):\n",
    "    \"\"\"\n",
    "    For every new task, you NEED to implement a reward function.\n",
    "    `RearrangeReward` automatically includes penalties for collisions into the reward function.\n",
    "    \"\"\"\n",
    "\n",
    "    cls_uuid: str = \"navpick_reward\"\n",
    "\n",
    "    def __init__(self, sim, config, *args, **kwargs):\n",
    "        self._sim = sim\n",
    "        self._config = config\n",
    "        # You can get you custom gonfiguration fields defined in NavPickRewardMeasurementConfig\n",
    "        self._scaling_factor = config.scaling_factor\n",
    "        super().__init__(sim=sim, config=config, **kwargs)\n",
    "\n",
    "    @staticmethod\n",
    "    def _get_uuid(*args, **kwargs):\n",
    "        return NavPickReward.cls_uuid\n",
    "\n",
    "    def reset_metric(self, *args, task, episode, **kwargs):\n",
    "        # Measurements can be computed from other measurements.\n",
    "        task.measurements.check_measure_dependencies(\n",
    "            self.uuid,\n",
    "            [\n",
    "                DistanceToTargetObject.cls_uuid,\n",
    "            ],\n",
    "        )\n",
    "        self.update_metric(*args, task=task, episode=episode, **kwargs)\n",
    "\n",
    "    def update_metric(self, *args, task, episode, **kwargs):\n",
    "        ee_to_object_distance = task.measurements.measures[\n",
    "            DistanceToTargetObject.cls_uuid\n",
    "        ].get_metric()\n",
    "\n",
    "        self._metric = -ee_to_object_distance * self._scaling_factor\n",
    "\n",
    "\n",
    "@registry.register_measure\n",
    "class NavPickSuccess(Measure):\n",
    "    \"\"\"\n",
    "    For every new task, you NEED to implement a \"success\" condition.\n",
    "    \"\"\"\n",
    "\n",
    "    cls_uuid: str = \"navpick_success\"\n",
    "\n",
    "    def __init__(self, sim, config, *args, **kwargs):\n",
    "        self._sim = sim\n",
    "        self._config = config\n",
    "        super().__init__(**kwargs)\n",
    "\n",
    "    @staticmethod\n",
    "    def _get_uuid(*args, **kwargs):\n",
    "        return NavPickSuccess.cls_uuid\n",
    "\n",
    "    def reset_metric(self, *args, episode, task, observations, **kwargs):\n",
    "        self.update_metric(\n",
    "            *args,\n",
    "            episode=episode,\n",
    "            task=task,\n",
    "            observations=observations,\n",
    "            **kwargs\n",
    "        )\n",
    "\n",
    "    def update_metric(self, *args, episode, task, observations, **kwargs):\n",
    "        # Check that the agent is holding the correct object.\n",
    "        abs_targ_obj_idx = self._sim.scene_obj_ids[task.target_object_index]\n",
    "        self._metric = abs_targ_obj_idx == self._sim.grasp_mgr.snap_idx\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class DistanceToTargetObjectMeasurementConfig(MeasurementConfig):\n",
    "    type: str = \"DistanceToTargetObject\"\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class NavPickRewardMeasurementConfig(MeasurementConfig):\n",
    "    type: str = \"NavPickReward\"\n",
    "    scaling_factor: float = 0.1\n",
    "    # General Rearrange Reward config\n",
    "    constraint_violate_pen: float = 10.0\n",
    "    force_pen: float = 0.001\n",
    "    max_force_pen: float = 1.0\n",
    "    force_end_pen: float = 10.0\n",
    "    count_coll_pen: float = -1.0\n",
    "    max_count_colls: int = -1\n",
    "    count_coll_end_pen: float = 1.0\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class NavPickSuccessMeasurementConfig(MeasurementConfig):\n",
    "    type: str = \"NavPickSuccess\"\n",
    "\n",
    "\n",
    "cs = ConfigStore.instance()\n",
    "cs.store(\n",
    "    package=\"habitat.task.measurements.distance_to_target_object\",\n",
    "    group=\"habitat/task/measurements\",\n",
    "    name=\"distance_to_target_object\",\n",
    "    node=DistanceToTargetObjectMeasurementConfig,\n",
    ")\n",
    "cs.store(\n",
    "    package=\"habitat.task.measurements.nav_pick_reward\",\n",
    "    group=\"habitat/task/measurements\",\n",
    "    name=\"nav_pick_reward\",\n",
    "    node=NavPickRewardMeasurementConfig,\n",
    ")\n",
    "cs.store(\n",
    "    package=\"habitat.task.measurements.nav_pick_success\",\n",
    "    group=\"habitat/task/measurements\",\n",
    "    name=\"nav_pick_success\",\n",
    "    node=NavPickSuccessMeasurementConfig,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We now add all the previously defined task, sensor, and measurement\n",
    "definitions to a config file to finish defining the new Habitat task. For\n",
    "examples of more configs [see here](https://github.com/facebookresearch/habitat-lab/tree/main/habitat-lab/habitat/config/habitat/task/rearrange).\n",
    "\n",
    "This config also defines the action space through the `task.actions` key. You\n",
    "can substitute different base control actions from\n",
    "[here](https://github.com/facebookresearch/habitat-lab/blob/main/habitat-lab/habitat/tasks/rearrange/actions/actions.py),\n",
    "different arm control actions [from\n",
    "here](https://github.com/facebookresearch/habitat-lab/blob/main/habitat-lab/habitat/tasks/rearrange/actions/actions.py),\n",
    "and different grip actions [from here](https://github.com/facebookresearch/habitat-lab/blob/main/habitat-lab/habitat/tasks/rearrange/actions/grip_actions.py)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cfg_txt = \"\"\"\n",
    "# @package _global_\n",
    "\n",
    "defaults:\n",
    "  - /habitat: habitat_config_base\n",
    "  - /habitat/simulator/agents@habitat.simulator.agents.main_agent: agent_base\n",
    "  - /habitat/simulator/sim_sensors@habitat.simulator.agents.main_agent.sim_sensors.head_rgb_sensor: head_rgb_sensor\n",
    "  - /habitat/task: task_config_base\n",
    "  - /habitat/task/actions:\n",
    "    - arm_action\n",
    "    - base_velocity\n",
    "  - /habitat/task/measurements:\n",
    "    - articulated_agent_force\n",
    "    - force_terminate\n",
    "    - distance_to_target_object\n",
    "    - nav_pick_reward\n",
    "    - nav_pick_success\n",
    "  - /habitat/task/lab_sensors:\n",
    "    - target_start_sensor\n",
    "    - joint_sensor\n",
    "  - /habitat/dataset/rearrangement: replica_cad\n",
    "\n",
    "habitat:\n",
    "  environment:\n",
    "    # Number of steps within an episode.\n",
    "    max_episode_steps: 200\n",
    "  task:\n",
    "    type: RearrangeDemoNavPickTask-v0\n",
    "    # Measurements\n",
    "    measurements:\n",
    "      distance_to_target_object:\n",
    "        type: \"DistanceToTargetObject\"\n",
    "      articulated_agent_force:\n",
    "        type: \"RobotForce\"\n",
    "        min_force: 20.0\n",
    "      force_terminate:\n",
    "        type: \"ForceTerminate\"\n",
    "        # Maximum amount of allowed force in Newtons.\n",
    "        max_accum_force: 5000.0\n",
    "      nav_pick_reward:\n",
    "        type: \"NavPickReward\"\n",
    "        scaling_factor: 0.1\n",
    "        # General Rearrange Reward config\n",
    "        constraint_violate_pen: 10.0\n",
    "        force_pen: 0.001\n",
    "        max_force_pen: 1.0\n",
    "        force_end_pen: 10.0\n",
    "      nav_pick_success:\n",
    "        type: \"NavPickSuccess\"\n",
    "    actions:\n",
    "      # Define the action space.\n",
    "      arm_action:\n",
    "        type: \"ArmAction\"\n",
    "        arm_controller: \"ArmRelPosAction\"\n",
    "        grip_controller: \"MagicGraspAction\"\n",
    "        arm_joint_dimensionality: 7\n",
    "        grasp_thresh_dist: 0.15\n",
    "        disable_grip: False\n",
    "        delta_pos_limit: 0.0125\n",
    "        ee_ctrl_lim: 0.015\n",
    "      base_velocity:\n",
    "        type: \"BaseVelAction\"\n",
    "        lin_speed: 12.0\n",
    "        ang_speed: 12.0\n",
    "        allow_dyn_slide: True\n",
    "        allow_back: True\n",
    "  simulator:\n",
    "    type: RearrangeSim-v0\n",
    "    additional_object_paths:\n",
    "      - \"data/objects/ycb/configs/\"\n",
    "    debug_render: False\n",
    "    concur_render: False\n",
    "    auto_sleep: False\n",
    "    agents:\n",
    "      main_agent:\n",
    "        height: 1.5\n",
    "        is_set_start_state: False\n",
    "        radius: 0.1\n",
    "        sim_sensors:\n",
    "          head_rgb_sensor:\n",
    "            height: 128\n",
    "            width: 128\n",
    "        start_position: [0, 0, 0]\n",
    "        start_rotation: [0, 0, 0, 1]\n",
    "        articulated_agent_urdf: ./data/robots/hab_fetch/robots/hab_fetch.urdf\n",
    "        articulated_agent_type: \"FetchRobot\"\n",
    "\n",
    "    # Agent setup\n",
    "    # ARM_REST: [0.6, 0.0, 0.9]\n",
    "    ctrl_freq: 120.0\n",
    "    ac_freq_ratio: 4\n",
    "\n",
    "    # Grasping\n",
    "    hold_thresh: 0.09\n",
    "    grasp_impulse: 1000.0\n",
    "\n",
    "    habitat_sim_v0:\n",
    "      allow_sliding: True\n",
    "      enable_physics: True\n",
    "      gpu_device_id: 0\n",
    "      gpu_gpu: False\n",
    "      physics_config_file: ./data/default.physics_config.json\n",
    "  dataset:\n",
    "    type: RearrangeDataset-v0\n",
    "    split: train\n",
    "    # The dataset to use. Later we will generate our own dataset.\n",
    "    data_path: data/datasets/replica_cad/rearrange/v2/{split}/all_receptacles_10k_1k.json.gz\n",
    "    scenes_dir: \"data/replica_cad/\"\n",
    "\"\"\"\n",
    "nav_pick_cfg_path = os.path.join(data_path, \"nav_pick_demo.yaml\")\n",
    "with open(nav_pick_cfg_path, \"w\") as f:\n",
    "    f.write(cfg_txt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The new task can then be imported via the yaml file."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with habitat.Env(\n",
    "    config=insert_render_options(habitat.get_config(nav_pick_cfg_path))\n",
    ") as env:\n",
    "    env.reset()\n",
    "\n",
    "    print(\"Agent acting inside environment.\")\n",
    "    count_steps = 0\n",
    "    # To save the video\n",
    "    video_file_path = os.path.join(output_path, \"example_interact.mp4\")\n",
    "    video_writer = imageio.get_writer(video_file_path, fps=30)\n",
    "\n",
    "    while not env.episode_over:\n",
    "        action = env.action_space.sample()\n",
    "        observations = env.step(action)  # noqa: F841\n",
    "        info = env.get_metrics()\n",
    "\n",
    "        render_obs = observations_to_image(observations, info)\n",
    "        render_obs = overlay_frame(render_obs, info)\n",
    "\n",
    "        video_writer.append_data(render_obs)\n",
    "\n",
    "        count_steps += 1\n",
    "    print(\"Episode finished after {} steps.\".format(count_steps))\n",
    "\n",
    "    video_writer.close()\n",
    "    if vut.is_notebook():\n",
    "        vut.display_video(video_file_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Dataset Generation\n",
    "The previously defined task uses an included default `all_receptacles_10k_1k.json.gz` dataset which places objects on any receptacle. The episode `.json.gz` dataset defines where\n",
    "objects are placed and their rearrangement target positions. New episode\n",
    "datasets are generated with the [run_episode_generator.py](https://github.com/facebookresearch/habitat-lab/blob/main/habitat/datasets/rearrange/run_episode_generator.py) script. In this example, we will define a new episode dataset where a single object spawns on the table with its goal also on the table."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset_cfg_txt = \"\"\"\n",
    "---\n",
    "dataset_path: \"data/replica_cad/replicaCAD.scene_dataset_config.json\"\n",
    "additional_object_paths:\n",
    "  - \"data/objects/ycb/configs/\"\n",
    "scene_sets:\n",
    "  -\n",
    "    name: \"v3_sc\"\n",
    "    included_substrings:\n",
    "      - \"v3_sc\"\n",
    "    excluded_substrings: []\n",
    "    comment: \"This set (v3_sc) selects all 105 ReplicaCAD variations with static furniture.\"\n",
    "\n",
    "object_sets:\n",
    "  -\n",
    "    name: \"kitchen\"\n",
    "    included_substrings:\n",
    "      - \"002_master_chef_can\"\n",
    "      - \"003_cracker_box\"\n",
    "    excluded_substrings: []\n",
    "    comment: \"Leave included_substrings empty to select all objects.\"\n",
    "\n",
    "receptacle_sets:\n",
    "  -\n",
    "    name: \"table\"\n",
    "    included_object_substrings:\n",
    "      - \"frl_apartment_table_01\"\n",
    "    excluded_object_substrings: []\n",
    "    included_receptacle_substrings:\n",
    "      - \"\"\n",
    "    excluded_receptacle_substrings: []\n",
    "    comment: \"The empty substrings act like wildcards, selecting all receptacles for all objects.\"\n",
    "\n",
    "scene_sampler:\n",
    "  type: \"subset\"\n",
    "  params:\n",
    "    scene_sets: [\"v3_sc\"]\n",
    "  comment: \"Samples from ReplicaCAD 105 variations with static furniture.\"\n",
    "\n",
    "\n",
    "object_samplers:\n",
    "  -\n",
    "    name: \"kitchen_counter\"\n",
    "    type: \"uniform\"\n",
    "    params:\n",
    "      object_sets: [\"kitchen\"]\n",
    "      receptacle_sets: [\"table\"]\n",
    "      num_samples: [1, 1]\n",
    "      orientation_sampling: \"up\"\n",
    "\n",
    "object_target_samplers:\n",
    "  -\n",
    "    name: \"kitchen_counter_targets\"\n",
    "    type: \"uniform\"\n",
    "    params:\n",
    "      object_samplers: [\"kitchen_counter\"]\n",
    "      receptacle_sets: [\"table\"]\n",
    "      num_samples: [1, 1]\n",
    "      orientation_sampling: \"up\"\n",
    "\"\"\"\n",
    "nav_pick_cfg_path = os.path.join(data_path, \"nav_pick_dataset.yaml\")\n",
    "with open(nav_pick_cfg_path, \"w\") as f:\n",
    "    f.write(dataset_cfg_txt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!python -m habitat.datasets.rearrange.run_episode_generator --run --config {nav_pick_cfg_path} --num-episodes 10 --out data/nav_pick.json.gz"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "To use this dataset set `dataset.data_path = data/nav_pick.json.gz` in the task config. See the full set of possible objects, receptacles, and scenes with `python -m habitat.datasets.rearrange.run_episode_generator --list`"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "Habitat 2.0 Quick Start Tutorial",
   "provenance": []
  },
  "jupytext": {
   "cell_metadata_filter": "-all",
   "formats": "nb_python//py:percent,notebooks//ipynb",
   "notebook_metadata_filter": "all"
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
