{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "# Copyright (c) Meta Platforms, Inc. and affiliates.\n",
    "#\n",
    "# This source code is licensed under the MIT license found in the\n",
    "# LICENSE file in the root directory of this source tree.\n",
    "import argparse\n",
    "import dataclasses\n",
    "import sys\n",
    "import timeit\n",
    "from typing import Tuple\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import torch\n",
    "from tqdm import tqdm\n",
    "import pandas as pd\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from scannet_dataset import ScanNetDataset\n",
    "# from referit3d_data import ReferIt3dDataConfig\n",
    "# from scanrefer_data import ScanReferDataConfig\n",
    "from pytorch3d.io import IO\n",
    "from pytorch3d.structures import Pointclouds\n",
    "from home_robot.datasets.scannet import ScanNetDataset, ReferIt3dDataConfig, ScanReferDataConfig, NUM_CLASSES_LONG\n",
    "data = ScanNetDataset(\n",
    "    root_dir = '/private/home/ssax/home-robot/src/home_robot/home_robot/datasets/scannet/data',\n",
    "    frame_skip = 180,\n",
    "    n_classes=NUM_CLASSES_LONG,\n",
    "    # n_classes=50,\n",
    "    referit3d_config = ReferIt3dDataConfig(),\n",
    "    scanrefer_config = ScanReferDataConfig(),\n",
    ")\n",
    "\n",
    "# Load specific scene\n",
    "# idx = data.scene_list.index(\"scene0192_00\") #'scene0000_00'\n",
    "idx = 0\n",
    "print(f\"Loaded images of (h: {data.height}, w: {data.width}) - resized from ({data.DEFAULT_HEIGHT},{data.DEFAULT_WIDTH})\")\n",
    "scene_obs = data.__getitem__(idx, show_progress=True)\n",
    "\n",
    "# Load GT mesh\n",
    "from pytorch3d.io import IO, load_obj, load_ply\n",
    "scene_id = scene_obs['scan_name']\n",
    "print(\"Loading GT mesh for\", scene_id)\n",
    "# verts = load_ply(data.root_dir / f'scans/{scene_id}/{scene_id}_vh_clean.ply')\n",
    "pc = IO().load_pointcloud(data.root_dir / f'scans/{scene_id}/{scene_id}_vh_clean.ply')\n",
    "verts = pc.points_packed()\n",
    "aligned_verts = torch.cat([verts, torch.ones_like(verts[:,:1])], dim=-1) @ scene_obs['axis_align_mats'][0].T\n",
    "pointcloud_aligned = Pointclouds(points=aligned_verts[...,:3].unsqueeze(0), features=pc.features_packed().unsqueeze(0))\n",
    "\n",
    "# Load short-form GT mesh (50k points, with semantic + instance labels) \n",
    "ins_50k = torch.from_numpy(np.load(data.instance_dir / f'{scene_id}_ins_label.npy').astype(np.int32))\n",
    "_verts = torch.from_numpy(np.load(data.instance_dir / f'{scene_id}_vert.npy'))\n",
    "locs_50k, col_50k = _verts[:,:3], (_verts[:,3:] / 255.)\n",
    "locs_50k = torch.cat([locs_50k, torch.ones_like(locs_50k[:,:1])], dim=-1) @ scene_obs['axis_align_mats'][0].T\n",
    "locs_50k = locs_50k[:,:3]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# df[:10]\n",
    "scene_obs['ref_expr']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Visualize referring expression\n",
    "# Title: Query\n",
    "# Trace: Pointcloud \n",
    "# Trace: GT bbox\n",
    "# Trace: Distractors of same class\n",
    "selected = scene_obs['box_target_ids'] == 39\n",
    "id_to_name = dict(zip(data.METAINFO['CLASS_IDS'], data.METAINFO['CLASS_NAMES']))\n",
    "id_to_name[scene_obs['box_classes'][selected].item()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# K = scene_obs['intrinsics'][0][:3,:3]\n",
    "# depth = scene_obs['depths'][0].squeeze().unsqueeze(0).unsqueeze(1)\n",
    "# valid_depth  = (0.1 < depth) & (depth < 4.0)\n",
    "\n",
    "# xyz = unproject_masked_depth_to_xyz_coordinates(\n",
    "#     depth = depth,\n",
    "#     mask  = ~valid_depth,\n",
    "#     pose  = torch.eye(4).unsqueeze(0),\n",
    "#     inv_intrinsics = torch.linalg.inv(K).unsqueeze(0),\n",
    "# )\n",
    "# rgb = scene_obs['images'][0].reshape(-1,3)[valid_depth.flatten()]\n",
    "# print(scene_obs['image_paths'][0])\n",
    "# print(f\"Proportion depth valid: {float(valid_depth.float().mean())}\")\n",
    "# print(f\"Depth min + max: {float(depth.flatten()[valid_depth.flatten()].min())}, {float(depth.flatten()[valid_depth.flatten()].max())}\")\n",
    "# print(\"These are the mins-maxes along each world axis. They should be in meters:\")\n",
    "# for i in range(3):\n",
    "#     print(f\"  {i}: ({float(xyz[:,i].min())}, {float(xyz[:,i].max())})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.imshow(scene_obs['depths'][0])\n",
    "plt.show()\n",
    "plt.imshow(scene_obs['images'][0])\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -> SparseVoxelMapWithInstanceViews.show(backend='pytorch3d')\n",
    "\n",
    "# Plot GT scene\n",
    "from home_robot.utils.bboxes_3d import BBoxes3D, join_boxes_as_scene, join_boxes_as_batch\n",
    "from home_robot.utils.bboxes_3d_plotly import plot_scene_with_bboxes\n",
    "from pytorch3d.vis.plotly_vis import AxisArgs\n",
    "from pytorch3d.structures import Pointclouds\n",
    "import seaborn as sns\n",
    "\n",
    "colors = torch.tensor(sns.color_palette(\"husl\", len(scene_obs['boxes_aligned'])))\n",
    "gt_boxes = BBoxes3D(\n",
    "    bounds = [scene_obs['boxes_aligned']],\n",
    "    # features = [colors[0].unsqueeze(0).expand(27,3)],\n",
    "    features = [colors],\n",
    "    names = [scene_obs['box_classes'].unsqueeze(-1)]\n",
    ")\n",
    "\n",
    "pointcloud_50k = Pointclouds(points=[locs_50k], features=[col_50k])\n",
    "\n",
    "fig = plot_scene_with_bboxes(\n",
    "    plots = { f\"{scene_id}\": { \n",
    "\n",
    "                                \"GT boxes\": gt_boxes,\n",
    "                                # \"GT points\": pointcloud_aligned,\n",
    "                                \"GT points smol\": pointcloud_50k,\n",
    "                                # \"cameras\": cameras,\n",
    "                            }\n",
    "    },\n",
    "    xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
    "    yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
    "    zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
    "    axis_args=AxisArgs(showgrid=True),\n",
    "    pointcloud_marker_size=3,\n",
    "    pointcloud_max_points=200_000,\n",
    "    boxes_wireframe_width=3,\n",
    "    boxes_add_cross_face_bars=False,\n",
    "    # boxes_name_int_to_display_name_dict = dict(zip([int(i) for i in data.METAINFO['seg_valid_class_ids']], data.METAINFO['classes'])),\n",
    "    boxes_name_int_to_display_name_dict = dict(zip(data.METAINFO['CLASS_IDS'], data.METAINFO['CLASS_NAMES'])),\n",
    "\n",
    "    boxes_plot_together=False,\n",
    "    height=1000,\n",
    "    # width=1000,\n",
    ")\n",
    "fig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import Tensor\n",
    "from typing import Tuple, Optional\n",
    "from home_robot.utils.point_cloud_torch import get_bounds\n",
    "from home_robot.utils.bboxes_3d import box3d_volume_from_bounds\n",
    "def transform_basis(points: torch.Tensor, normal_vector: torch.Tensor) -> torch.Tensor:\n",
    "    \"\"\"\n",
    "    Transforms a set of points to a basis where the first two basis vectors\n",
    "    are in the plane computed using SVD, and the third basis vector is along\n",
    "    the normal dimension.\n",
    "\n",
    "    :param points: A 2D tensor of shape (N, 3), representing N points in 3D space.\n",
    "    :param normal_vector: A 1D tensor of shape (3), representing the normal vector.\n",
    "    :return: A 2D tensor of shape (N, 3), representing the transformed points.\n",
    "    \"\"\"\n",
    "    assert points.dim() == 2 and points.size(1) == 3, \"points must be a 2D tensor with shape (N, 3)\"\n",
    "    assert normal_vector.dim() == 1 and normal_vector.size(0) == 3, \"normal_vector must be a 1D tensor with shape (3)\"\n",
    "\n",
    "    # Normalize the normal vector\n",
    "    normal_vector = normal_vector / torch.norm(normal_vector)\n",
    "\n",
    "    # Compute the centroid of the points\n",
    "    centroid = torch.mean(points, dim=0)\n",
    "\n",
    "    # Compute the points in the plane by subtracting centroid and projecting to the plane\n",
    "    points_in_plane = points - centroid\n",
    "    points_in_plane = points_in_plane - (points_in_plane @ normal_vector.unsqueeze(-1)) * normal_vector.unsqueeze(0)\n",
    "\n",
    "    # Compute the SVD of the points_in_plane\n",
    "    u, s, vh = torch.linalg.svd(points_in_plane, full_matrices=False)\n",
    "\n",
    "    # Construct the transformation matrix using the first two singular vectors and the normal vector\n",
    "    transformation_matrix = torch.stack([vh[0], vh[1], normal_vector])\n",
    "\n",
    "    # Transform the points using the transformation matrix\n",
    "    transformed_points = (points - centroid) @ transformation_matrix.T\n",
    "\n",
    "    return transformed_points, transformation_matrix\n",
    "\n",
    "def fit_plane_to_points(\n",
    "        normal_vec: Tensor,\n",
    "        points: Tensor,\n",
    "        return_residuals: bool = False\n",
    "    ) -> Tensor:\n",
    "    \"\"\"\n",
    "    Use least squares to fit a plane to a given set of points in K-dimensional space using a specified normal vector.\n",
    "    This function computes the d coefficient of the plane equation: <n, P> + d = 0\n",
    "    using the provided normal vector n and a set of points P.\n",
    "    \n",
    "    \n",
    "    Parameters:\n",
    "    -----------\n",
    "    normal_vec : torch.Tensor\n",
    "        A 1D tensor of shape (K,) representing the normal vector (a1, a2, ..., an) to the hyperplane.\n",
    "    points : torch.Tensor\n",
    "        A 2D tensor of shape (N, K), representing K points in n-dimensional space, where each row is a point (x1, x2, ..., xK).\n",
    "    return_residuals : bool, optional\n",
    "        Whether to return the residuals, i.e., the perpendicular distances of the points from the fitted hyperplane. Default is False.\n",
    "\n",
    "    Returns:\n",
    "    --------\n",
    "    plane_params : torch.Tensor\n",
    "        A 1D tensor of shape (K+1,) representing the coefficients (a1, a2, ..., aK, d) of the hyperplane equation.\n",
    "    residuals : torch.Tensor (only if return_residuals is True)\n",
    "        A 1D tensor of shape (N,) representing the residuals, i.e., the perpendicular distances of the points from the fitted hyperplane.\n",
    "\n",
    "    Example:\n",
    "    --------\n",
    "    >>> normal_vec = torch.tensor([0., 1.])\n",
    "    >>> points = torch.tensor([[1., 2.], [3., 4.], [5., 6.]])\n",
    "    >>> fit_plane_to_points(normal_vec, points, return_residuals=True)\n",
    "    (tensor([0., 1., -3.]), tensor([1., 0., -1.]))\n",
    "\n",
    "    \"\"\"\n",
    "    assert normal_vec.dim() == 1, \"normal_vec must be a 1D tensor\"\n",
    "    assert points.dim() == 2 and points.size(1) == normal_vec.size(0), \"points must be a 2D tensor of shape (N, n) where n is the length of normal_vec\"\n",
    "    \n",
    "    # Normalize the normal vector\n",
    "    normal_vec = normal_vec / normal_vec.norm()\n",
    "    \n",
    "    # Solve for d in the hyperplane equation: a1*x1 + a2*x2 + ... + an*xn + d = 0\n",
    "    d = - (points * normal_vec).sum(dim=-1).mean()\n",
    "\n",
    "    # If residuals are requested\n",
    "    if return_residuals:\n",
    "        residuals = (points * normal_vec).sum(dim=-1) + d\n",
    "        return torch.cat([normal_vec, d.unsqueeze(0)]), residuals\n",
    "    \n",
    "    return torch.cat([normal_vec, d.unsqueeze(0)])\n",
    "\n",
    "def find_placeable_location(\n",
    "        pointcloud: Tensor,\n",
    "        ground_normal: Tensor,\n",
    "        nbr_dist: float,\n",
    "        residual_thresh: float,\n",
    "        max_tries: Optional[int] = None,\n",
    "        min_neighborhood_points: int = 3,\n",
    "        min_area_prop: float = 0.25,\n",
    "    ) -> Tuple[Tensor, float]:\n",
    "    \"\"\"\n",
    "    Finds a suitable placement location on a flat surface in the given pointcloud.\n",
    "    \n",
    "    Args:\n",
    "        pointcloud (Tensor): A 2D tensor representing the pointcloud, with shape (num_points, dims), \n",
    "            where dims is the dimensionality of the points.\n",
    "        ground_normal (Tensor): A 1D tensor representing the normal vector to the ground plane.\n",
    "        neighborhood_thresh (float): The threshold distance to determine the neighborhood of a point.\n",
    "        residual_thresh (float): The threshold for the average absolute residual to determine if a \n",
    "            sampled point is suitable as a placement location.\n",
    "\n",
    "    Returns:\n",
    "        Tuple[Tensor, float]: A tuple containing the suitable location as a 1D tensor and the average \n",
    "            absolute residual of the fit at that location as a float.\n",
    "            \n",
    "    Usage:\n",
    "        >>> pointcloud = torch.rand((1000, 3))  # Example pointcloud\n",
    "        >>> ground_normal = torch.tensor([0., 1., 0.])  # Example normal vector\n",
    "        >>> neighborhood_thresh = 0.1  # Example neighborhood threshold\n",
    "        >>> residual_thresh = 0.01  # Example residual threshold\n",
    "        >>> location, residual = find_placeable_location(pointcloud, ground_normal, neighborhood_thresh, residual_thresh)\n",
    "        \n",
    "    Note:\n",
    "        The function will keep searching for a suitable location until it finds one. Consider setting a maximum \n",
    "        limit of tries or a timeout to avoid possible infinite loops if using in a scenario where a suitable \n",
    "        location may not exist.\n",
    "    \"\"\"\n",
    "    assert pointcloud.ndim == 2 and pointcloud.shape[1] >= 3, f\"Pointcloud must be a 2D Tensor with shape (num_points, 3), not {pointcloud.shape=}\"\n",
    "    num_points = pointcloud.shape[0]\n",
    "    max_tries = max_tries if max_tries is not None else num_points\n",
    "    max_tries = min(max_tries, num_points)\n",
    "    \n",
    "    print(max_tries,)\n",
    "    idxs = torch.randperm(num_points)[:max_tries]\n",
    "    print(idxs)\n",
    "    for idx in idxs:\n",
    "        # 1. Sample a location from the pointcloud\n",
    "        sample_point = pointcloud[idx]\n",
    "        \n",
    "        # 2. Extract a neighborhood around that location\n",
    "        dists = torch.norm(pointcloud - sample_point.unsqueeze(0), dim=1)\n",
    "        neighborhood = pointcloud[dists < nbr_dist]\n",
    "         \n",
    "        if neighborhood.shape[0] < min_neighborhood_points:\n",
    "            # If there are less than 3 points in the neighborhood, skip this iteration\n",
    "            continue\n",
    "        \n",
    "        # 3. Check the fit of the oriented plane in that location using fit_plane_to_points\n",
    "        plane_params, residuals = fit_plane_to_points(ground_normal, neighborhood, return_residuals=True)\n",
    "        nbrhd_plane, tform = transform_basis(points=neighborhood, normal_vector=normal_vec)\n",
    "        bounds = get_bounds(nbrhd_plane)\n",
    "        mins, maxs = bounds[:2].unbind(dim=-1)\n",
    "        area = torch.prod(maxs - mins, dim=-1)\n",
    "        if area < (nbr_dist * 2) ** 2 * min_area_prop:\n",
    "            continue\n",
    "        residuals = nbrhd_plane[:, 2]\n",
    "        \n",
    "        # 4. If the fit average absolute residual is under some threshold, return that location\n",
    "        avg_residual = torch.mean(torch.abs(residuals))\n",
    "        if avg_residual < residual_thresh:\n",
    "            return sample_point, avg_residual\n",
    "    raise ValueError(f'No suitable location found after {max_tries} tries')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cls_idx = data.ALL_CLASS_NAMES_TO_CLASS_IDS['couch']\n",
    "first_idx = list(scene_obs['box_classes'].cpu().numpy()).index(cls_idx)\n",
    "inst_id = scene_obs['box_target_ids'][first_idx]\n",
    "\n",
    "\n",
    "mask_obj = (ins_50k == inst_id)\n",
    "pointcloud_obj_50k = Pointclouds(points=[locs_50k[mask_obj]], features=[col_50k[mask_obj]])\n",
    "pointcloud_50k = Pointclouds(points=[locs_50k], features=[col_50k])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "nbrhd_plane.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "nbrhd_dist = 0.1\n",
    "normal_vec = torch.tensor((0., 0., 1.))\n",
    "loc, residual = find_placeable_location(\n",
    "    pointcloud = pointcloud_obj_50k.points_packed(),\n",
    "    ground_normal = normal_vec,\n",
    "    nbr_dist = nbrhd_dist, # in meters\n",
    "    residual_thresh = 0.01,\n",
    "    max_tries = 100,\n",
    ")\n",
    "dists = torch.norm(pointcloud_obj_50k.points_packed() - loc.unsqueeze(0), dim=1)\n",
    "nbrhd = pointcloud_obj_50k.points_packed()[dists < nbrhd_dist]\n",
    "nbrhd_plane, tform = transform_basis(points=nbrhd, normal_vector=normal_vec)\n",
    "nbrhd.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from home_robot.utils.point_cloud_torch import get_bounds\n",
    "from home_robot.utils.bboxes_3d import box3d_volume_from_bounds\n",
    "bounds = get_bounds(nbrhd_plane)\n",
    "volume = box3d_volume_from_bounds(bounds)\n",
    "\n",
    "mins, maxs = bounds[:2].unbind(dim=-1)\n",
    "area = torch.prod(maxs - mins, dim=-1) * 1000\n",
    "print(f'Area {area.item():0.4f} / Volume {volume.item():0.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "maxs - mins"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# -> SparseVoxelMapWithInstanceViews.show(backend='pytorch3d')\n",
    "\n",
    "# Plot GT scene\n",
    "from home_robot.utils.bboxes_3d import BBoxes3D, join_boxes_as_scene, join_boxes_as_batch\n",
    "from home_robot.utils.bboxes_3d_plotly import plot_scene_with_bboxes\n",
    "from pytorch3d.vis.plotly_vis import AxisArgs\n",
    "from pytorch3d.structures import Pointclouds\n",
    "import seaborn as sns\n",
    "\n",
    "colors = torch.tensor(sns.color_palette(\"husl\", len(scene_obs['boxes_aligned'])))\n",
    "gt_boxes = BBoxes3D(\n",
    "    bounds = [scene_obs['boxes_aligned']],\n",
    "    # features = [colors[0].unsqueeze(0).expand(27,3)],\n",
    "    features = [colors],\n",
    "    names = [scene_obs['box_classes'].unsqueeze(-1)]\n",
    ")\n",
    "\n",
    "mask_obj = (ins_50k == inst_id)\n",
    "# pointcloud_50k = Pointclouds(points=[locs_50k[mask_obj]], features=[col_50k[mask_obj]])\n",
    "neighborhood = Pointclouds(points=[nbrhd])\n",
    "\n",
    "fig = plot_scene_with_bboxes(\n",
    "    plots = { f\"{scene_id}\": { \n",
    "\n",
    "                                \"GT boxes\": gt_boxes,\n",
    "                                # \"GT points\": pointcloud_aligned,\n",
    "                                \"GT points smol\": pointcloud_obj_50k,\n",
    "                                \"Place location\": neighborhood,\n",
    "                                # \"cameras\": cameras,\n",
    "                            }\n",
    "    },\n",
    "    xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
    "    yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
    "    zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
    "    axis_args=AxisArgs(showgrid=True),\n",
    "    pointcloud_marker_size=3,\n",
    "    pointcloud_max_points=200_000,\n",
    "    boxes_wireframe_width=3,\n",
    "    boxes_add_cross_face_bars=False,\n",
    "    # boxes_name_int_to_display_name_dict = dict(zip([int(i) for i in data.METAINFO['seg_valid_class_ids']], data.METAINFO['classes'])),\n",
    "    boxes_name_int_to_display_name_dict = dict(zip(data.METAINFO['CLASS_IDS'], data.METAINFO['CLASS_NAMES'])),\n",
    "\n",
    "    boxes_plot_together=False,\n",
    "    height=1000,\n",
    "    # width=1000,\n",
    ")\n",
    "fig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
