{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "import random\n",
    "from torchvision import transforms\n",
    "from home_robot.agent.multitask.sparse_voxel_instance_map import SparseVoxelMapAgent\n",
    "from PIL import Image\n",
    "import numpy as np\n",
    "from home_robot.datasets.scannet import ScanNetDataset\n",
    "from home_robot.datasets.scannet.referit3d_data import ReferIt3dDataConfig, load_referit3d_data\n",
    "from home_robot.datasets.scannet.scanrefer_data import ScanReferDataConfig, load_scanrefer_data\n",
    "from home_robot.datasets.scannet.scannet_dataset import ScanNetDataset\n",
    "from home_robot.datasets.scannet.scannet_constants import NUM_CLASSES as NUM_CLASSES_LONG\n",
    "from home_robot.perception import OvmmPerception\n",
    "import torch\n",
    "import hydra\n",
    "import json\n",
    "from omegaconf import DictConfig, OmegaConf\n",
    "from hydra.utils import instantiate\n",
    "# from data_visualization import InstanceMemeoryVisualizer\n",
    "from typing import List, Optional, Tuple, Union\n",
    "import cv2\n",
    "import pickle\n",
    "from pathlib import Path\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def get_image_from_path(\n",
    "    image_path: Union[str, Path],\n",
    "    height: Optional[int] = None,\n",
    "    width: Optional[int] = None,\n",
    "    keep_alpha: bool = False,\n",
    ") -> torch.Tensor:\n",
    "    \"\"\"Returns a 3 channel image.\n",
    "    # Adapted from https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/dataparsers/scannet_dataparser.py\n",
    "    Args:\n",
    "        image_idx: The image index in the dataset.\n",
    "    \"\"\"\n",
    "    pil_image = Image.open(image_path)\n",
    "\n",
    "    assert (height is None) == (width is None)  # Neither or both\n",
    "    if height is not None:\n",
    "        pil_image = pil_image.resize((width, height), resample=Image.BILINEAR)\n",
    "    image = np.array(pil_image, dtype=\"uint8\")  # shape is (h, w) or (h, w, 3 or 4)\n",
    "    if len(image.shape) == 2:\n",
    "        image = image[:, :, None].repeat(3, axis=2)\n",
    "    assert len(image.shape) == 3\n",
    "    assert image.dtype == np.uint8\n",
    "    assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n",
    "    image = torch.from_numpy(image.astype(\"float32\") / 255.0)\n",
    "    if not keep_alpha and image.shape[-1] == 4:\n",
    "        image = image[:, :, :3]\n",
    "        # image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n",
    "    return image\n",
    "\n",
    "def get_depth_image_from_path(\n",
    "    filepath: Path,\n",
    "    height: Optional[int] = None,\n",
    "    width: Optional[int] = None,\n",
    "    scale_factor: float = 1.0,\n",
    "    interpolation: int = cv2.INTER_NEAREST,\n",
    ") -> torch.Tensor:\n",
    "    \"\"\"Loads, rescales and resizes depth images.\n",
    "    Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`.\n",
    "    # Adapted from https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/dataparsers/scannet_dataparser.py\n",
    "    Args:\n",
    "        filepath: Path to depth image.\n",
    "        height: Target depth image height.\n",
    "        width: Target depth image width.\n",
    "        scale_factor: Factor by which to scale depth image.\n",
    "        interpolation: Depth value interpolation for resizing.\n",
    "\n",
    "    Returns:\n",
    "        Depth image torch tensor with shape [height, width, 1].\n",
    "    \"\"\"\n",
    "    assert (height is None) == (width is None)  # Neither or both\n",
    "    do_resize = height is not None\n",
    "    if filepath.suffix == \".npy\":\n",
    "        image = np.load(filepath) * scale_factor\n",
    "        assert (height is None) == (width is None)  # Neither or both\n",
    "        if do_resize:\n",
    "            image = cv2.resize(image, (width, height), interpolation=interpolation)\n",
    "    else:\n",
    "        image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH)\n",
    "        image = image.astype(np.float64) * scale_factor\n",
    "        if do_resize:\n",
    "            image = cv2.resize(image, (width, height), interpolation=interpolation)\n",
    "    return torch.from_numpy(image[:, :, np.newaxis])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class OVMMLoader:\n",
    "    DEPTH_SCALE_FACTOR = 1000 / (2**16)\n",
    "    FRAME_SKIP = 1\n",
    "    HEIGHT = 640\n",
    "    WIDTH = 480\n",
    "    def __init__(self, path, height: Optional[int] = 480, width: Optional[int] = 640):\n",
    "        self.path = path\n",
    "        self.height = self.HEIGHT\n",
    "        self.width = self.WIDTH\n",
    "\n",
    "    def load_scene(self, scene = \"000-hm3d-BFRyYbPCCPE\"): # testing\n",
    "        # option 1: TODO: do we still want to load from Arjun's directory?\n",
    "        scene_path = os.path.join(self.path, scene)\n",
    "        if not os.path.exists(scene_path):\n",
    "            return None\n",
    "        steps = set()\n",
    "        for file in os.listdir(scene_path):\n",
    "            if any(char.isdigit() for char in file):\n",
    "                steps.add(file[:5])\n",
    "        steps = list(sorted(steps))\n",
    "        steps = steps[0::self.FRAME_SKIP]\n",
    "        images = []\n",
    "        depths = []\n",
    "        poses = []\n",
    "        for step in steps:\n",
    "            rgb_file = os.path.join(scene_path, step + '-rgb.png')\n",
    "            depth_file = os.path.join(scene_path, step + '-depth.png')\n",
    "            pose_file = os.path.join(scene_path, step + '.txt')\n",
    "            rgb_tensor = get_image_from_path(Path(rgb_file), height=self.height, width=self.width)\n",
    "            depth_tensor = get_depth_image_from_path(\n",
    "                Path(depth_file),\n",
    "                height=self.height,\n",
    "                width=self.width,\n",
    "                scale_factor=self.DEPTH_SCALE_FACTOR,\n",
    "            )\n",
    "            valid_depth = (depth_tensor > 0.1) & (depth_tensor < 4)\n",
    "            if True not in valid_depth:\n",
    "                continue\n",
    "            images.append(rgb_tensor)\n",
    "            depths.append(depth_tensor)\n",
    "            # with open(pose_file, 'rb') as f:\n",
    "            #     pose_info = pickle.load(f)\n",
    "            poses.append(torch.from_numpy(np.loadtxt(pose_file)))\n",
    "        intrinsic_file = os.path.join(scene_path, 'intrinsic_color.txt')  \n",
    "        intrinsic = np.loadtxt(intrinsic_file)  \n",
    "        intrinsics = torch.from_numpy(np.repeat(np.expand_dims(intrinsic, axis=0), len(steps),  axis=0)).to(\"cuda\").float()\n",
    "        images = torch.stack(images).float().to(\"cuda\")\n",
    "        depths = torch.stack(depths).float().to(\"cuda\")\n",
    "        poses = torch.stack(poses).float().to(\"cuda\")\n",
    "        scene_obs = {\"images\": images, \"depths\": depths, \"poses\": poses, \"intrinsics\": intrinsics}\n",
    "        return scene_obs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def opengl_to_opencv(pose):\n",
    "    transform = np.array(\n",
    "        [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]\n",
    "    )\n",
    "    pose = pose @ transform\n",
    "    return pose\n",
    "\n",
    "def from_width_height_fov_to_intrinsics(\n",
    "    width: float,\n",
    "    height: float,\n",
    "    fov_degrees: float,\n",
    "):\n",
    "    \"\"\"Create a simple pinhole camera given minimal information only. Fov is in degrees\"\"\"\n",
    "    horizontal_fov_rad = np.radians(fov_degrees)\n",
    "    h_focal_length = width / (2 * np.tan(horizontal_fov_rad / 2))\n",
    "    v_focal_length = width / (\n",
    "        2 * np.tan(horizontal_fov_rad / 2) * float(height) / width\n",
    "    )\n",
    "    principal_point_x = (width - 1.0) / 2\n",
    "    principal_point_y = (height - 1.0) / 2\n",
    "    # principal_point_x = width / 2\n",
    "    # principal_point_y = height / 2\n",
    "    # return np.array([[v_focal_length, 0, principal_point_x, 0],\n",
    "    #                  [0, h_focal_length, principal_point_y, 0],\n",
    "    #                  [0, 0, 1, 0],\n",
    "    #                  [0, 0, 0, 1]])\n",
    "    return np.array([[h_focal_length, 0, principal_point_x, 0],\n",
    "                     [0, h_focal_length, principal_point_y, 0],\n",
    "                     [0, 0, 1, 0],\n",
    "                     [0, 0, 0, 1]])\n",
    "# def convert_obs_to_instance_memory(scene_id, scene_obs):\n",
    "#     if not scene_obs:\n",
    "#         return\n",
    "#     print (f'running instance memory on {scene_id}')\n",
    "#     self.instance_agent.build_scene_and_get_instances_for_queries(scene_obs, queries=[], reset=False)\n",
    "\n",
    "#     instances = self.instance_agent.voxel_map.get_instances()\n",
    "#     num_instances = len(instances)\n",
    "#     print (f\"{num_instances} instances perceived\")\n",
    "#     os.makedirs(os.path.join(self.save_dir, scene_id), exist_ok=True)\n",
    "#     with open(os.path.join(self.save_dir, scene_id, 'voxel_map.pkl'), 'wb') as f:\n",
    "#         pickle.dump(self.instance_agent.voxel_map, f)\n",
    "class InstanceEvaluator:\n",
    "    \n",
    "    def __init__(self, instance_agent, save_dir=None):\n",
    "        self.instance_agent = instance_agent\n",
    "        self.ovmm = OVMMLoader()\n",
    "        # Add to ScanNetSparseVoxelMap\n",
    "        class_id_to_class_names = dict(\n",
    "            zip(\n",
    "                self.scannet.data.METAINFO[\"CLASS_IDS\"],  # IDs [1, 3, 4, 5, ..., 65]\n",
    "                self.scannet.data.METAINFO[\"CLASS_NAMES\"],  # [wall, floor, cabinet, ...]\n",
    "            )\n",
    "        )\n",
    "        self.instance_agent.set_vocabulary(class_id_to_class_names)\n",
    "        self.save_dir = save_dir\n",
    "        if save_dir:\n",
    "            self.visualizer = InstanceMemeoryVisualizer(save_dir)\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    def eval_ovmm_eqa(self):\n",
    "        \"\"\"run instance memory pipeline on the annotated ScanNet EQA dataset\"\"\"\n",
    "        # TODO: change to the new annotation\n",
    "        with open(\"/private/home/xiaohanzhang/data/ovmm_bench_500_frames/ovmm_bench_v1_annotated.json\") as f:\n",
    "            annotations = json.load(f)\n",
    "        scenes = set()\n",
    "        for ann in annotations:\n",
    "            if 'success' in ann:\n",
    "                scenes.add(ann['episode_id'])\n",
    "        scenes = sorted(list(scenes))\n",
    "        print (scenes)\n",
    "\n",
    "        for scene_id in scenes:\n",
    "            if os.path.exists(os.path.join(self.save_dir, scene_id)):\n",
    "                continue\n",
    "            self.instance_agent.reset()\n",
    "            scene_obs = self.ovmm.load_scene(scene=scene_id)\n",
    "            self.convert_obs_to_instance_memory(scene_id, scene_obs)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @hydra.main(config_path=\"/private/home/xiaohanzhang/home-robot/projects/scannet_offline_eval/configs/model\", \n",
    "#             config_name=\"instancemap3d_top_down_detic\")\n",
    "scannet = ScanNetDataset(\n",
    "    root_dir = '/private/home/ssax/home-robot/src/home_robot/home_robot/datasets/scannet/data',\n",
    "    frame_skip = 180,\n",
    "    split = 'val',\n",
    "    n_classes=NUM_CLASSES_LONG,\n",
    "    referit3d_config = ReferIt3dDataConfig(),\n",
    "    scanrefer_config = ScanReferDataConfig(),    \n",
    ")\n",
    "from hydra import compose, initialize\n",
    "from omegaconf import OmegaConf\n",
    "# context initialization\n",
    "with initialize(version_base=None, config_path=\"../scannet_offline_eval/configs/model\", job_name=\"test_app\"):\n",
    "    cfg = compose(config_name=\"instancemap3d_top_down_detic\")\n",
    "    print(OmegaConf.to_yaml(cfg))\n",
    "instance_agent = instantiate(cfg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# set vocabulary\n",
    "class_id_to_class_names = dict(\n",
    "    zip(\n",
    "        scannet.METAINFO[\"CLASS_IDS\"],  # IDs [1, 3, 4, 5, ..., 65]\n",
    "        scannet.METAINFO[\"CLASS_NAMES\"],  # [wall, floor, cabinet, ...]\n",
    "    )\n",
    ")\n",
    "instance_agent.set_vocabulary(class_id_to_class_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = \"/private/home/xiaohanzhang/accel-cortex/frames\"\n",
    "data_frame_path = data_path+'_frames'\n",
    "# scene_id = \"104348328_171513363_790\"\n",
    "scene_id = \"\"\n",
    "print (f'extracting scene id {scene_id}')\n",
    "with open(os.path.join(data_path, scene_id, 'obs_data.pkl'), 'rb') as f:\n",
    "    obs = pickle.load(f)\n",
    "import shutil\n",
    "if os.path.exists(os.path.join(data_frame_path, scene_id)):\n",
    "    shutil.rmtree(os.path.join(data_frame_path, scene_id))\n",
    "os.makedirs(os.path.join(data_frame_path, scene_id), exist_ok=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "found_obj = False\n",
    "found_recep = False\n",
    "for step in range(len(obs)):\n",
    "    format_step = \"{:05d}\".format(step)\n",
    "    # perceived_ids = np.unique(obs[step].task_observations['gt_instance_ids'])\n",
    "    # save rgb\n",
    "    if not os.path.exists(os.path.join(data_frame_path, scene_id, f'{format_step}-rgb.png')):\n",
    "        print (obs[step].rgb)\n",
    "        rgb = Image.fromarray(obs[step].rgb)\n",
    "        rgb.save(os.path.join(data_frame_path, scene_id, f'{format_step}-rgb.png'))\n",
    "    # # save depth\n",
    "    if not os.path.exists(os.path.join(data_frame_path, scene_id, f'{format_step}-depth.png')):\n",
    "        depth = Image.fromarray((obs[step].depth/1000*2**16).astype(np.uint16))\n",
    "        depth.save(os.path.join(data_frame_path, scene_id, f'{format_step}-depth.png'))        \n",
    "    # save pose\n",
    "    # print (obs[step].camera_pose)\n",
    "    # print (get_real_world_camera_pose(obs[step]))\n",
    "    # np.savetxt(os.path.join(data_frame_path, scene_id, f'{format_step}.txt'), get_real_world_camera_pose(obs[step]))\n",
    "    new_pose = obs[step].camera_pose\n",
    "    # print (new_pose)\n",
    "    np.savetxt(os.path.join(data_frame_path, scene_id, f'{format_step}.txt'), new_pose)\n",
    "        \n",
    "# save_intrinsic\n",
    "# TODO: uncomment this\n",
    "np.savetxt(os.path.join(data_frame_path, scene_id, 'intrinsic_color.txt'), from_width_height_fov_to_intrinsics(480, 640, 42))\n",
    "np.savetxt(os.path.join(data_frame_path, scene_id, 'intrinsic_depth.txt'), from_width_height_fov_to_intrinsics(480, 640, 42))\n",
    "\n",
    "ovmm = OVMMLoader(path=data_frame_path)\n",
    "scene_obs = ovmm.load_scene(scene=scene_id)\n",
    "np.set_printoptions(suppress=True)\n",
    "# print (np.round(scene_obs['poses'][0].cpu().numpy(), decimals=2))\n",
    "print (scene_obs['poses'].cpu().numpy())\n",
    "from home_robot.datasets.eqa.utils import relative_transformation, to_scalar\n",
    "def _preprocess_poses(poses: torch.Tensor):\n",
    "    r\"\"\"Preprocesses the poses by setting first pose in a sequence to identity and computing the relative\n",
    "    homogenous transformation for all other poses.\n",
    "\n",
    "    Args:\n",
    "        poses (torch.Tensor): Pose matrices to be preprocessed\n",
    "\n",
    "    Returns:\n",
    "        Output (torch.Tensor): Preprocessed poses\n",
    "\n",
    "    Shape:\n",
    "        - poses: :math:`(L, 4, 4)` where :math:`L` denotes sequence length.\n",
    "        - Output: :math:`(L, 4, 4)` where :math:`L` denotes sequence length.\n",
    "    \"\"\"\n",
    "    return relative_transformation(\n",
    "        poses[0].unsqueeze(0).repeat(poses.shape[0], 1, 1),\n",
    "        poses,\n",
    "        orthogonal_rotations=False,\n",
    "    )\n",
    "relative_pose = False\n",
    "poses = scene_obs['poses']\n",
    "print (len(poses))\n",
    "start, end, stride = 0, -1, 1\n",
    "if end == -1:\n",
    "    end = len(poses)\n",
    "poses = poses[start : end : stride]\n",
    "print (len(poses))\n",
    "# self.transformed_poses = datautils.poses_to_transforms(self.poses)\n",
    "# poses = torch.stack(poses)\n",
    "if relative_pose:\n",
    "    transformed_poses = _preprocess_poses(poses)\n",
    "else:\n",
    "    transformed_poses = poses\n",
    "print (transformed_poses)\n",
    "scene_obs['poses'] = transformed_poses\n",
    "\n",
    "\n",
    "# pose1 = scene_obs['poses'][0]\n",
    "# pose2 = scene_obs['poses'][2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "instance_agent.reset()\n",
    "instance_agent.build_scene_and_get_instances_for_queries(scene_obs, queries=[], reset=False)\n",
    "instances = instance_agent.voxel_map.get_instances()\n",
    "num_instances = len(instances)\n",
    "print (f\"{num_instances} instances perceived\")\n",
    "instance_agent.voxel_map.show(backend=\"pytorch3d\")\n",
    "# os.makedirs(os.path.join(self.save_dir, scene_id), exist_ok=True)\n",
    "# with open(os.path.join(self.save_dir, scene_id, 'voxel_map.pkl'), 'wb') as f:\n",
    "#     pickle.dump(self.instance_agent.voxel_map, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pytorch3d.structures import Pointclouds\n",
    "from pytorch3d.vis.plotly_vis import AxisArgs, plot_scene, get_camera_wireframe\n",
    "from home_robot.utils.bboxes_3d_plotly import plot_scene_with_bboxes\n",
    "from home_robot.utils.point_cloud_torch import unproject_masked_depth_to_xyz_coordinates\n",
    "import matplotlib.pyplot as plt\n",
    "import plotly.graph_objects as go\n",
    "\n",
    "def colormap_to_rgb_strings(data, colormap_name='viridis', include_alpha=False, min_val=None, max_val=None):\n",
    "    \"\"\"\n",
    "    Convert a range of numbers from a given dataset into a series of RGB or RGBA strings using a specified Matplotlib colormap.\n",
    "\n",
    "    :param data: The dataset from which to derive color mappings.\n",
    "    :param colormap_name: The name of the Matplotlib colormap to use.\n",
    "    :param include_alpha: Boolean to decide if the alpha channel should be included in the RGB strings.\n",
    "    :param min_val: Optional minimum value for colormap scaling.\n",
    "    :param max_val: Optional maximum value for colormap scaling.\n",
    "    :return: A list of color strings in the format 'rgb(R,G,B)' or 'rgba(R,G,B,A)'.\n",
    "    \"\"\"\n",
    "    # Compute min and max from the data if not provided\n",
    "    if min_val is None:\n",
    "        min_val = np.min(data)\n",
    "    if max_val is None:\n",
    "        max_val = np.max(data)\n",
    "\n",
    "    # Normalize data within the provided or computed min and max range\n",
    "    norm = plt.Normalize(min_val, max_val)\n",
    "    colors = plt.cm.get_cmap(colormap_name)(norm(data))\n",
    "\n",
    "    # Format color strings based on the include_alpha flag\n",
    "    if include_alpha:\n",
    "        return [\"rgba({},{},{},{})\".format(int(r*255), int(g*255), int(b*255), a) for r, g, b, a in colors]\n",
    "    else:\n",
    "        return [\"rgb({},{},{})\".format(int(r*255), int(g*255), int(b*255)) for r, g, b in colors[:, :3]]\n",
    "def add_camera_poses(\n",
    "    fig,\n",
    "    poses,\n",
    "    linewidth = 3,\n",
    "    color = None,\n",
    "    name = 'cam',\n",
    "    separate = True,\n",
    "    scale = 0.2,\n",
    "    colormap_name='plasma'\n",
    "    ):\n",
    "    cam_points = get_camera_wireframe(scale)\n",
    "    # Convert p3d (opengl) to opencv\n",
    "    cam_points[:, 1] *= -1\n",
    "\n",
    "    if color is None:\n",
    "        colors = colormap_to_rgb_strings(list(range(len(poses))), colormap_name=colormap_name)\n",
    "    else:\n",
    "        colors = [color] * len(poses)\n",
    "    for i, (pose, color) in enumerate(zip(poses, colors)):\n",
    "        # cam_points[:, 2] *= -1\n",
    "        R = pose[:3, :3]\n",
    "        t = pose[:3, -1]\n",
    "        cam_points_world = cam_points @ R.T + t.unsqueeze(0)  # (cam_points @ R) # + t)\n",
    "        x, y, z = [v.cpu().numpy().tolist() for v in cam_points_world.unbind(1)]\n",
    "        fig.add_trace(\n",
    "            go.Scatter3d(\n",
    "                x=x,\n",
    "                y=y,\n",
    "                z=z,\n",
    "                mode=\"lines\",\n",
    "                marker={\n",
    "                    \"size\": 1,\n",
    "                    \"color\": color,\n",
    "                },\n",
    "                line=dict(\n",
    "                    width=linewidth,\n",
    "                    color=color,\n",
    "                ),\n",
    "                name=f'{name}-{i}',\n",
    "            )\n",
    "        )\n",
    "\n",
    "rgb, depth, pose, Ks = scene_obs['images'].cpu(), scene_obs['depths'].cpu(), scene_obs['poses'].cpu(), scene_obs['intrinsics'].cpu()\n",
    "# print (depth[0])\n",
    "# print (rgb[0])\n",
    "# print (pose[0])\n",
    "print (Ks[0])\n",
    "print (depth.shape)\n",
    "print (rgb.shape)\n",
    "print (pose.shape)\n",
    "print (Ks.shape)\n",
    "unprojected = unproject_masked_depth_to_xyz_coordinates(\n",
    "    # depth = depth[0, None].unsqueeze(1),\n",
    "    # pose = poses_opencv[0, None],\n",
    "    depth = depth.unsqueeze(1).squeeze(-1),\n",
    "    pose = pose,\n",
    "    inv_intrinsics = torch.linalg.inv(Ks)[:, :3, :3],\n",
    "    # mask: Optional[torch.Tensor] = None,\n",
    ") \n",
    "\n",
    "ptc = Pointclouds(\n",
    "    [unprojected.reshape(-1, 3)],\n",
    "    features = [rgb.reshape(-1,3)],\n",
    ").subsample(100000)\n",
    "\n",
    "fig = plot_scene({\n",
    "    \"global scene\": dict(\n",
    "        ptc=ptc\n",
    "    )\n",
    "    },\n",
    "    xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
    "    yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
    "    zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
    "    axis_args=AxisArgs(showgrid=True),\n",
    "    pointcloud_marker_size=3,\n",
    "    pointcloud_max_points=200_000,\n",
    "    height=1000,\n",
    "    # width=1000,\n",
    ")\n",
    "\n",
    "add_camera_poses(fig, pose)\n",
    "fig.update_layout(\n",
    "    # width=width,\n",
    "    height=1000,\n",
    "    # aspectmode=\"data\"\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
