{
 "cells": [
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "import os\n",
    "import torch\n",
    "import json\n",
    "from tqdm.auto import tqdm\n",
    "from internal.renderers.gsplat_renderer import GSPlatRenderer\n",
    "from internal.renderers.gsplat_hit_pixel_count_renderer import GSplatHitPixelCountRenderer\n",
    "from internal.dataparsers.colmap_dataparser import Colmap, ColmapDataParser\n",
    "from internal.utils.sh_utils import RGB2SH\n",
    "from internal.utils.gaussian_model_loader import GaussianModelLoader"
   ],
   "id": "b7536842f67d030b"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "torch.autograd.set_grad_enabled(False)",
   "id": "c3b6a22b10bad7fe"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "partition_base_dir = os.path.expanduser(\n",
    "    \"~/data/image_set/JNUCar_undistorted/colmap/drone/dense_max_2048/0/partitions-threshold_0.2/\")"
   ],
   "id": "dd5856f17623939c"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "dataparser_outputs = ColmapDataParser(\n",
    "    os.path.join(partition_base_dir, \"..\"),\n",
    "    output_path=os.getcwd(),\n",
    "    global_rank=0,\n",
    "    params=Colmap(\n",
    "        appearance_groups=\"appearance_image_dedicated\",\n",
    "        eval_step=32\n",
    "    ),\n",
    ").get_outputs()"
   ],
   "id": "fc6bcabeed9f4f3d"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "image_name_to_camera_idx = {}\n",
    "for idx, name in enumerate(dataparser_outputs.train_set.image_names):\n",
    "    image_name_to_camera_idx[name] = idx\n",
    "image_name_to_camera_idx"
   ],
   "id": "d3f3f16cce66cdc4"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "def get_cameras_by_image_list(image_list: list):\n",
    "    cameras = []\n",
    "    for i in image_list:\n",
    "        cameras.append(dataparser_outputs.train_set.cameras[image_name_to_camera_idx[i]])\n",
    "    return cameras"
   ],
   "id": "ff36c2fd3e493fd2"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "partitions = torch.load(os.path.join(partition_base_dir, \"partitions.pt\"),\n",
    "                        map_location=\"cpu\")\n",
    "\n",
    "orientation_transformation = partitions[\"orientation_transformation\"]\n",
    "\n",
    "model_paths = []\n",
    "for idx, i in enumerate(partitions[\"ids\"]):\n",
    "    if len(partitions[\"image_indices\"][idx]) < 32:\n",
    "        continue\n",
    "    model_paths.append((i, os.path.join(\n",
    "        \"../outputs/JNUAerial-0526/\",\n",
    "        f\"P_{i[0]:03d}_{i[1]:03d}.txt\")))\n",
    "partitions.keys(), model_paths"
   ],
   "id": "d79a4401ba810d04"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "partition_id_to_index = {i: idx for idx, i in enumerate(partitions[\"ids\"])}\n",
    "partition_id_to_index"
   ],
   "id": "3b3df7d0949a8f7e"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# group name to image list\n",
    "with open(os.path.join(partition_base_dir, \"..\", \"appearance_image_dedicated.json\"), \"r\") as f:\n",
    "    appearance_groups = json.load(f)\n",
    "# image name to group name\n",
    "image_name_to_group_name = {}\n",
    "for group_name in appearance_groups:\n",
    "    image_names = appearance_groups[group_name]\n",
    "    for image_name in image_names:\n",
    "        image_name_to_group_name[image_name] = group_name"
   ],
   "id": "d5f4846cdd03418d"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# def calculate_partition_features_dc(model_output_path, partition_id, ckpt, is_in_partition):\n",
    "#     # the ids of the groups in this model\n",
    "#     appearance_group_ids = torch.load(os.path.join(model_output_path, \"appearance_group_ids.pth\"))\n",
    "# \n",
    "#     # load partition image list, then get their correspond appearance ids\n",
    "#     partition_used_appearance_ids = []\n",
    "#     with open(os.path.join(partition_base_dir, f\"{partition_id[0]:03d}_{partition_id[1]:03d}.txt\"), \"r\") as f:\n",
    "#         for row in f:\n",
    "#             image_group_name = image_name_to_group_name[row.rstrip(\"\\n\")]\n",
    "#             partition_used_appearance_ids.append(appearance_group_ids[image_group_name][0])\n",
    "# \n",
    "#     # get average appearance embeddings of these used images\n",
    "#     average_appearance_embedding = ckpt[\"hyper_parameters\"][\"renderer\"].model.embedding(\n",
    "#         torch.tensor(partition_used_appearance_ids, dtype=torch.int)\n",
    "#     ).mean(dim=0)\n",
    "# \n",
    "#     # calculate rgb_offset from appearance embedding\n",
    "#     partition_features_extra = ckpt[\"state_dict\"][\"gaussian_model._features_extra\"][is_in_partition]\n",
    "#     appearance_mlp_input = torch.concat([partition_features_extra, average_appearance_embedding.unsqueeze(0).repeat(\n",
    "#         partition_features_extra.shape[0], 1)], dim=-1)\n",
    "#     rgb_offset = ckpt[\"hyper_parameters\"][\"renderer\"].model.network.to(\"cuda\")(appearance_mlp_input.to(\"cuda\")) * 2 - 1.\n",
    "# \n",
    "#     # calculate base_rgb from features_dc\n",
    "#     base_rgb = spherical_harmonics(0, torch.ones(partition_features_extra.shape[0], 3).to(\"cuda\"),\n",
    "#                                    ckpt[\"state_dict\"][\"gaussian_model._features_dc\"][is_in_partition].to(\"cuda\")) + 0.5\n",
    "# \n",
    "#     # calculate final rgb values\n",
    "#     rgbs = (base_rgb + rgb_offset).clamp(0., 1.)\n",
    "# \n",
    "#     # convert to SHs\n",
    "#     partition_features_dc = RGB2SH(rgbs).unsqueeze(1)\n",
    "# \n",
    "#     return partition_features_dc"
   ],
   "id": "338c328f0fdccbdd"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "def calculate_gaussian_scores(cameras, gaussian_model, device):\n",
    "    hit_count_list = []\n",
    "    opacity_score_list = []\n",
    "    alpha_score_list = []\n",
    "    all_visibility_score = torch.zeros((len(cameras), gaussian_model.get_xyz.shape[0]), dtype=torch.float, device=device)\n",
    "    for idx, camera in tqdm(enumerate(cameras), total=len(cameras)):\n",
    "        hit_count, opacity_score, alpha_score, visibility_score = GSplatHitPixelCountRenderer.hit_pixel_count(\n",
    "            means3D=gaussian_model.get_xyz,\n",
    "            opacities=gaussian_model.get_opacity,\n",
    "            scales=gaussian_model.get_scaling,\n",
    "            rotations=gaussian_model.get_rotation,\n",
    "            viewpoint_camera=camera.to_device(\"cuda\"),\n",
    "        )\n",
    "        # hit_count_list.append(hit_count.cpu())\n",
    "        # opacity_score_list.append(opacity_score.cpu())\n",
    "        # alpha_score_list.append(alpha_score.cpu())\n",
    "        all_visibility_score[idx] = visibility_score.to(device=device)\n",
    "        # visibility_score_list.append(visibility_score.cpu())\n",
    "\n",
    "    torch.cuda.empty_cache()\n",
    "\n",
    "    return all_visibility_score"
   ],
   "id": "577e5ab63df38cb"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "def prune_gaussian_model(gaussian_model, mask):\n",
    "    gaussian_model.properties = {k: v[mask] for k, v in gaussian_model.properties.items()}"
   ],
   "id": "b39d63e415a3be5"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "def get_pruned_gaussian_model(image_list, ckpt, is_in_partition):\n",
    "    cuda_device = torch.device(\"cuda\")\n",
    "    \n",
    "    # get camera correspond to the image list\n",
    "    cameras = get_cameras_by_image_list(image_list)\n",
    "    # build appearance id, the order is the same as the list `cameras`\n",
    "    camera_index_to_appearance_id = torch.tensor([i.appearance_id for i in cameras], dtype=torch.int, device=cuda_device)\n",
    "\n",
    "    gaussian_model = GaussianModelLoader.initialize_model_from_checkpoint(\n",
    "        ckpt,\n",
    "        \"cpu\",\n",
    "    )\n",
    "    # prune those outside bounding box\n",
    "    prune_gaussian_model(gaussian_model, is_in_partition)\n",
    "    gaussian_model.to(device=cuda_device)\n",
    "    \n",
    "    # get renderer\n",
    "    renderer = GaussianModelLoader.initialize_renderer_from_checkpoint(ckpt, stage=\"validation\", device=cuda_device)\n",
    "\n",
    "    # calculate Gaussians' visibility score to each camera; the output may consume a lot of memory, so put it on CPU\n",
    "    visibility_score = calculate_gaussian_scores(cameras, gaussian_model, \"cpu\").T  # [N_gaussians, N_cameras]\n",
    "    # calculate total visibility score for each Gaussian\n",
    "    visibility_score_acc = torch.sum(visibility_score, dim=-1)\n",
    "    # find Gaussian whose total visibility is closed to zero\n",
    "    visibility_score_acc_is_close_to_zero = torch.isclose(visibility_score_acc, torch.tensor(0., device=visibility_score_acc.device))\n",
    "    gaussian_to_preserve = ~visibility_score_acc_is_close_to_zero\n",
    "    # prune again\n",
    "    prune_gaussian_model(gaussian_model, gaussian_to_preserve.to(device=cuda_device))\n",
    "    visibility_score_pruned = visibility_score[~visibility_score_acc_is_close_to_zero]\n",
    "    del visibility_score\n",
    "    \n",
    "    \"\"\"\n",
    "    [NOTE]\n",
    "    The codes below is related to `GSplatAppearanceEmbeddingRenderer`, aiming to remove the dependency on MLP.\n",
    "    If you have not enabled this model, simply comment out them.\n",
    "    \n",
    "    === `GSplatAppearanceEmbeddingRenderer` related code start ===\n",
    "    \"\"\"\n",
    "\n",
    "    # get top `n_average_cameras` visibility cameras\n",
    "    n_average_cameras = 32\n",
    "    visibility_score_pruned_sorted = torch.topk(visibility_score_pruned, k=n_average_cameras, dim=-1)\n",
    "    visibility_score_pruned_sorted_values = visibility_score_pruned_sorted.values.to(device=cuda_device)\n",
    "    visibility_score_pruned_sorted_indices = visibility_score_pruned_sorted.indices.to(device=cuda_device)\n",
    "    del visibility_score_pruned_sorted\n",
    "    \n",
    "    visibility_score_pruned_top_k_acc = torch.sum(visibility_score_pruned_sorted_values, dim=-1, keepdim=True)\n",
    "    # calculate the weight of each camera\n",
    "    visibility_score_pruned_top_k_pdf = visibility_score_pruned_sorted_values / visibility_score_pruned_top_k_acc\n",
    "    assert torch.all(torch.isclose(visibility_score_pruned_top_k_pdf.sum(dim=-1), torch.tensor(1., device=visibility_score_pruned_top_k_pdf.device)))\n",
    "\n",
    "    # pick appearance id\n",
    "    appearance_ids = camera_index_to_appearance_id[visibility_score_pruned_sorted_indices.reshape(-1)]  # [N_gaussians * n_average_cameras]\n",
    "    # pick appearance embeddings\n",
    "    appearance_embeddings = renderer.model.embedding(appearance_ids).reshape((\n",
    "        visibility_score_pruned_sorted_indices.shape[0],\n",
    "        n_average_cameras,\n",
    "        -1,\n",
    "    ))  # [N_gaussians, n_average_cameras, N_embedding_dims]\n",
    "    # multiply embeddings by camera weights\n",
    "    weighted_appearance_embeddings = appearance_embeddings * visibility_score_pruned_top_k_pdf.unsqueeze(-1)\n",
    "    # merge `n_average_cameras` embedding to a single embedding\n",
    "    final_appearance_embeddings = torch.sum(weighted_appearance_embeddings, dim=1)\n",
    "\n",
    "    # embedding network forward, output rgb_offset\n",
    "    embedding_network = renderer.model.network\n",
    "    input_tensor_list = [\n",
    "        gaussian_model.get_appearance_features(),\n",
    "        final_appearance_embeddings,\n",
    "    ]\n",
    "    \n",
    "    # view dependent\n",
    "    if renderer.model_config.is_view_dependent:\n",
    "        camera_index_to_camera_center = torch.stack(\n",
    "            [i.camera_center for i in cameras], \n",
    "        ).to(device=cuda_device)  # [N_cameras, 3]\n",
    "        camera_centers = camera_index_to_camera_center[visibility_score_pruned_sorted_indices]  # [N_gaussians, n_average_cameras, 3]\n",
    "        \n",
    "        # not sure which one below is better\n",
    "        \n",
    "        # [OPTION 1] weighted camera centers\n",
    "        # weighted_camera_centers = camera_centers * visibility_score_pruned_top_k_pdf.unsqueeze(-1)\n",
    "        # final_camera_centers = torch.sum(weighted_camera_centers, dim=1)  # [N_gaussians, 3]\n",
    "        # view_directions = torch.nn.functional.normalize(gaussian_model.get_means() - final_camera_centers, dim=-1)\n",
    "        \n",
    "        # [OPTION 2] weighted view directions\n",
    "        unweighted_view_directions = torch.nn.functional.normalize(gaussian_model.get_means().unsqueeze(1) - camera_centers, dim=-1)  # [N_gaussians, n_average_cameras, 3]\n",
    "        weighted_view_directions = unweighted_view_directions * visibility_score_pruned_top_k_pdf.unsqueeze(-1)\n",
    "        view_directions = torch.nn.functional.normalize(torch.sum(weighted_view_directions, dim=1), dim=-1)  # [N_gaussians, 3]\n",
    "        \n",
    "        encoded_view_directions = renderer.model.view_direction_encoding(view_directions)\n",
    "        \n",
    "        input_tensor_list.append(encoded_view_directions)\n",
    "    \n",
    "    input_tensor = torch.concat(input_tensor_list, dim=-1).to(cuda_device)\n",
    "    rgb_offset = embedding_network(input_tensor)\n",
    "    # convert rgb_offset to SHs\n",
    "    sh_offset = RGB2SH(rgb_offset)\n",
    "\n",
    "    gaussian_model.shs_dc = gaussian_model.shs_dc + sh_offset.unsqueeze(1)\n",
    "    \n",
    "    \"\"\" \n",
    "    === `GSplatAppearanceEmbeddingRenderer` related code end ===\n",
    "    \"\"\"\n",
    "    \n",
    "    gaussian_model.to(device)\n",
    "\n",
    "    return gaussian_model"
   ],
   "id": "cd7e3cf918d111e3"
  },
  {
   "cell_type": "code",
   "source": [
    "dtype = torch.float\n",
    "device = torch.device(\"cpu\")\n",
    "\n",
    "n_sh_degrees = 0\n",
    "xyzs = torch.zeros((0, 3), dtype=dtype, device=device)\n",
    "features_dc = torch.zeros((0, 1, 3), dtype=dtype, device=device)\n",
    "features_rest = torch.zeros((0, ((n_sh_degrees + 1) ** 2 - 1), 3), dtype=dtype, device=device)\n",
    "scales = torch.zeros((0, 3), dtype=dtype, device=device)\n",
    "rotations = torch.zeros((0, 4), dtype=dtype, device=device)\n",
    "opacities = torch.zeros((0, 1), dtype=dtype, device=device)\n",
    "\n",
    "with tqdm(model_paths) as t:\n",
    "    for i in t:\n",
    "        # if i[0] != (0, 0):\n",
    "        #     continue\n",
    "        with open(os.path.join(i[1], \"cameras.json\"), \"r\") as f:\n",
    "            cameras_json = json.load(f)\n",
    "        image_list = [i[\"img_name\"] for i in cameras_json]\n",
    "\n",
    "        partition_xy = partitions[\"xys\"][partition_id_to_index[i[0]]]\n",
    "        load_file = GaussianModelLoader.search_load_file(i[1])\n",
    "        t.set_description(f\"{partition_xy}: {load_file}\")\n",
    "        ckpt = torch.load(load_file, map_location=device)\n",
    "        xyz = ckpt[\"state_dict\"][\"gaussian_model.gaussians.means\"]\n",
    "        reoriented_xyz = xyz @ orientation_transformation[:3, :3].T\n",
    "        # include min bound, exclude max bound\n",
    "        is_in_partition = torch.logical_and(torch.ge(reoriented_xyz[:, :2], partition_xy),\n",
    "                                            torch.lt(reoriented_xyz[:, :2], partition_xy + 2 * partitions[\"radius\"]))\n",
    "        is_in_partition = torch.logical_and(is_in_partition[:, 0], is_in_partition[:, 1])\n",
    "\n",
    "        pruned_gaussian_model = get_pruned_gaussian_model(image_list, ckpt, is_in_partition)\n",
    "\n",
    "        xyzs = torch.concat([xyzs, pruned_gaussian_model.means])\n",
    "        features_dc = torch.concat([features_dc, pruned_gaussian_model.shs_dc])\n",
    "        features_rest = torch.concat([features_rest, pruned_gaussian_model.shs_rest])\n",
    "        scales = torch.concat([scales, pruned_gaussian_model.scales])\n",
    "        rotations = torch.concat([rotations, pruned_gaussian_model.rotations])\n",
    "        opacities = torch.concat([opacities, pruned_gaussian_model.opacities])\n",
    "\n",
    "        torch.cuda.empty_cache()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fcbc9acc13582886",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# model_output_path = i[1]\n",
    "# model_output_path"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8c6527bd1275eee6",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# # load partition image list, then get their correspond appearance ids\n",
    "# partition_id = i[0]\n",
    "# partition_used_appearance_ids = []\n",
    "# image_list = []\n",
    "# with open(os.path.join(partition_base_dir, f\"{partition_id[0]:03d}_{partition_id[1]:03d}.txt\"), \"r\") as f:\n",
    "#     for row in f:\n",
    "#         image_list.append(row.rstrip(\"\\n\"))\n",
    "# \n",
    "# cameras = get_cameras_by_image_list(image_list)\n",
    "# state_dict = {}\n",
    "# for i in ckpt[\"state_dict\"]:\n",
    "#     if i.startswith(\"gaussian_model._\"):\n",
    "#         state_dict[i] = ckpt[\"state_dict\"][i][is_in_partition]\n",
    "# gaussian_model = GaussianModelSimplified.construct_from_state_dict(\n",
    "#     state_dict,\n",
    "#     active_sh_degree=ckpt[\"hyper_parameters\"][\"gaussian\"].sh_degree,\n",
    "#     device=\"cuda\",\n",
    "# )"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8e46a8205e9dd16d",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# for i in state_dict:\n",
    "#     ckpt[\"state_dict\"][i] = state_dict[i]\n",
    "# torch.save(ckpt, os.path.join(model_output_path, \"checkpoints\", \"pruned.ckpt\"))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "68211744578409bb",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "import gc\n",
    "\n",
    "gc.collect()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f92b0b84433b6f01",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# # get the contribution to every camera of each Gaussian\n",
    "# visibility_score = calculate_gaussian_scores(cameras, gaussian_model)  # [N_cameras, N_gaussians]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e11633843bd34ed1",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score = visibility_score.T"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c8101999ea35e477",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score.shape  # [N_gaussians, N_cameras]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "330d5ac7f812cc9b",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_acc = torch.sum(visibility_score, dim=-1)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "16ede4f3f773e2e7",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_acc_is_close_to_zero = torch.isclose(visibility_score_acc, torch.tensor(0.))\n",
    "# visibility_score_acc_is_close_to_zero.sum()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5403e5c277fa68d3",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_acc_is_close_to_zero.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8fc7c9499f2d65af",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# for i in state_dict:\n",
    "#     state_dict[i] = state_dict[i][~visibility_score_acc_is_close_to_zero]\n",
    "# for i in state_dict:\n",
    "#     ckpt[\"state_dict\"][i] = state_dict[i]\n",
    "# torch.save(ckpt, os.path.join(model_output_path, \"checkpoints\", \"pruned-closed-to-zero.ckpt\"))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "91232037d23e70d1",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_pruned = visibility_score[~visibility_score_acc_is_close_to_zero]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5456e5803cb814be",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# # get top `n_average_cameras` camera by contribution\n",
    "# n_average_cameras = 32\n",
    "# visibility_score_pruned_sorted = torch.topk(visibility_score_pruned, k=n_average_cameras, dim=-1)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c9e04924d43dae96",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_pruned_sorted.indices.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c5538a08d114b981",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_pruned_sorted.indices[0], visibility_score_pruned_sorted.values[0]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ac86f0fdbbeb8e00",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# if some visibility of the 2nd-Kst cameras are closed to zero, use the 1st camera\n",
    "# visibility_score_sorted.indices[:, 1:] = torch.where(\n",
    "#     torch.isclose(visibility_score_sorted.values[:, 1:], torch.tensor(0.)),\n",
    "#     visibility_score_sorted.indices[:, :1],\n",
    "#     visibility_score_sorted.indices[:, 1:],\n",
    "# )\n",
    "# visibility_score_sorted.indices[0], visibility_score_sorted.values[0]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "4b4003896897372c",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visibility_score_pruned_top_k_acc = torch.sum(visibility_score_pruned_sorted.values, dim=-1, keepdim=True)\n",
    "# visibility_score_pruned_top_k_acc.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "796e82754cb388c3",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# calculate the weight of each camera\n",
    "# visibility_score_pruned_top_k_pdf = visibility_score_pruned_sorted.values / visibility_score_pruned_top_k_acc\n",
    "# visibility_score_pruned_top_k_pdf.shape, torch.all(\n",
    "#     torch.isclose(visibility_score_pruned_top_k_pdf.sum(dim=-1), torch.tensor(1.)))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "eee9844bf945f05c",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# camera_index_to_appearance_id = torch.tensor([i.appearance_id for i in cameras], dtype=torch.int)\n",
    "# camera_index_to_appearance_id"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "81649bdc51b47fe9",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# appearance_ids = camera_index_to_appearance_id[visibility_score_pruned_sorted.indices.reshape(-1)]\n",
    "# appearance_ids.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a74def65d4fda68d",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# appearance_embeddings = ckpt[\"hyper_parameters\"][\"renderer\"].model.embedding(appearance_ids).reshape(\n",
    "#     (visibility_score_pruned_sorted.indices.shape[0], n_average_cameras, -1))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "44e617694d1bfb8b",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# weighted_appearance_embeddings = appearance_embeddings * visibility_score_pruned_top_k_pdf.unsqueeze(-1)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "eaec1c0ec5307c",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# final_appearance_embeddings = torch.sum(weighted_appearance_embeddings, dim=1)\n",
    "# final_appearance_embeddings.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "94a94232278ec020",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# embedding_network = ckpt[\"hyper_parameters\"][\"renderer\"].model.network.to(\"cuda\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8ed4e5e476ba3a35",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# input_tensor = torch.concat([state_dict[\"gaussian_model._features_extra\"], final_appearance_embeddings], dim=-1).to(\n",
    "#     \"cuda\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "314d50752b791923",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# rgb_offset = embedding_network(input_tensor)\n",
    "# rgb_offset"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "da3a3bb323a4aa39",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# sh_offset = RGB2SH(rgb_offset)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "102826846fdf79c6",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# state_dict[\"gaussian_model._features_dc\"].shape, sh_offset.unsqueeze(1).cpu().shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "45dea05b0bd56cb7",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# new_features_dc = state_dict[\"gaussian_model._features_dc\"] + sh_offset.unsqueeze(1).cpu()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f778acb16b8aaf03",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# state_dict[\"gaussian_model._features_dc\"] = new_features_dc"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "2fedbf712bee5d7a",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# remove `GSplatAppearanceEmbeddingRenderer`'s states from ckpt\n",
    "state_dict_key_to_delete = []\n",
    "for i in ckpt[\"state_dict\"]:\n",
    "    if i.startswith(\"renderer.\"):\n",
    "        state_dict_key_to_delete.append(i)\n",
    "for i in state_dict_key_to_delete:\n",
    "    del ckpt[\"state_dict\"][i]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8acee341206bcd69",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "ckpt[\"optimizer_states\"] = []",
   "id": "6d27d0321b9b6682"
  },
  {
   "cell_type": "code",
   "source": [
    "# ckpt[\"hyper_parameters\"][\"renderer\"] = GSPlatRenderer()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ed1f335b73e4b5ea",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# for i in state_dict:\n",
    "#     ckpt[\"state_dict\"][i] = state_dict[i]\n",
    "# torch.save(ckpt, os.path.join(model_output_path, \"checkpoints\", \"pruned-gsplat-vanilla-renderer.ckpt\"))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5095b2c9ac762a9",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# replace `AppearanceFeatureGaussian` with `VanillaGaussian`\n",
    "from internal.models.vanilla_gaussian import VanillaGaussian\n",
    "ckpt[\"hyper_parameters\"][\"gaussian\"] = VanillaGaussian(sh_degree=pruned_gaussian_model.max_sh_degree)"
   ],
   "id": "304907b227456a1"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# remove existing Gaussians from ckpt\n",
    "for i in list(ckpt[\"state_dict\"].keys()):\n",
    "    if i.startswith(\"gaussian_model.gaussians.\"):\n",
    "        del ckpt[\"state_dict\"][i]"
   ],
   "id": "f72005848286e07d"
  },
  {
   "cell_type": "code",
   "source": [
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.means\"] = xyzs\n",
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.shs_dc\"] = features_dc\n",
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.shs_rest\"] = features_rest\n",
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.scales\"] = scales\n",
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.rotations\"] = rotations\n",
    "ckpt[\"state_dict\"][\"gaussian_model.gaussians.opacities\"] = opacities\n",
    "ckpt[\"hyper_parameters\"][\"renderer\"] = GSPlatRenderer()  # replace `GSplatAppearanceEmbeddingRenderer` with `GSPlatRenderer`"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "238014ab2588b142",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "torch.save(ckpt, \"jnu_aerial-0526.ckpt\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "b617a8e290d97466",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "Merging is completed here",
   "id": "392db039f7106fea"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# [NOTE] Contents below are optional or just for experimental purposes",
   "id": "7baaeccfb90e06a7"
  },
  {
   "cell_type": "markdown",
   "source": "Update a specific partition",
   "metadata": {
    "collapsed": false
   },
   "id": "7cee25c7df9be830"
  },
  {
   "cell_type": "code",
   "source": [
    "dtype = torch.float\n",
    "device = torch.device(\"cpu\")\n",
    "\n",
    "input_path = \"../edited/20240415_232032.ckpt\"\n",
    "ckpt = torch.load(input_path, map_location=device)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "43b2809b9c84dc6b",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "xyz = ckpt[\"state_dict\"][\"gaussian_model._xyz\"]\n",
    "reoriented_xyz = xyz @ orientation_transformation[:3, :3].T"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a61b87380cde1776",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "target_partition_id = (0, 0)\n",
    "target_partition_ckpt = \"../outputs/JNUAerial/P_000_000.txt-random_background/checkpoints/epoch=19-step=30000.ckpt\""
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "2f8d6df3ab155a33",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "partition_xy = partitions[\"xys\"][partition_id_to_index[target_partition_id]]\n",
    "partition_xy"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e1e501ea2dd92c20",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# include min bound, exclude max bound\n",
    "is_in_partition = torch.logical_and(torch.ge(reoriented_xyz[:, :2], partition_xy),\n",
    "                                    torch.lt(reoriented_xyz[:, :2], partition_xy + 2 * partitions[\"radius\"]))\n",
    "is_in_partition = torch.logical_and(is_in_partition[:, 0], is_in_partition[:, 1])\n",
    "is_in_partition.sum()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "531791a4441b5601",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# remove point inside partition\n",
    "not_in_partition = ~is_in_partition\n",
    "xyzs = ckpt[\"state_dict\"][\"gaussian_model._xyz\"][not_in_partition]\n",
    "features_dc = ckpt[\"state_dict\"][\"gaussian_model._features_dc\"][not_in_partition]\n",
    "features_rest = ckpt[\"state_dict\"][\"gaussian_model._features_rest\"][not_in_partition]\n",
    "scales = ckpt[\"state_dict\"][\"gaussian_model._scaling\"][not_in_partition]\n",
    "rotations = ckpt[\"state_dict\"][\"gaussian_model._rotation\"][not_in_partition]\n",
    "opacities = ckpt[\"state_dict\"][\"gaussian_model._opacity\"][not_in_partition]\n",
    "xyzs.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8eb6e6e5e711d195",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "partition_ckpt = torch.load(target_partition_ckpt, map_location=device)\n",
    "reoriented_partition_xyz = partition_ckpt[\"state_dict\"][\"gaussian_model._xyz\"] @ orientation_transformation[:3, :3].T"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9c0dd03660d44e4f",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "is_in_partition = torch.logical_and(torch.ge(reoriented_partition_xyz[:, :2], partition_xy),\n",
    "                                    torch.lt(reoriented_partition_xyz[:, :2], partition_xy + 2 * partitions[\"radius\"]))\n",
    "is_in_partition = torch.logical_and(is_in_partition[:, 0], is_in_partition[:, 1])\n",
    "is_in_partition.sum()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5c4f27939520b775",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "xyzs = torch.concat([xyzs, partition_ckpt[\"state_dict\"][\"gaussian_model._xyz\"][is_in_partition]])\n",
    "features_dc = torch.concat([features_dc, partition_ckpt[\"state_dict\"][\"gaussian_model._features_dc\"][is_in_partition]])\n",
    "features_rest = torch.concat(\n",
    "    [features_rest, partition_ckpt[\"state_dict\"][\"gaussian_model._features_rest\"][is_in_partition]])\n",
    "scales = torch.concat([scales, partition_ckpt[\"state_dict\"][\"gaussian_model._scaling\"][is_in_partition]])\n",
    "rotations = torch.concat([rotations, partition_ckpt[\"state_dict\"][\"gaussian_model._rotation\"][is_in_partition]])\n",
    "opacities = torch.concat([opacities, partition_ckpt[\"state_dict\"][\"gaussian_model._opacity\"][is_in_partition]])"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "b9ee060a97f5904a",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "ckpt[\"state_dict\"][\"gaussian_model._xyz\"] = xyzs\n",
    "ckpt[\"state_dict\"][\"gaussian_model._features_dc\"] = features_dc\n",
    "ckpt[\"state_dict\"][\"gaussian_model._features_rest\"] = features_rest\n",
    "ckpt[\"state_dict\"][\"gaussian_model._scaling\"] = scales\n",
    "ckpt[\"state_dict\"][\"gaussian_model._rotation\"] = rotations\n",
    "ckpt[\"state_dict\"][\"gaussian_model._opacity\"] = opacities"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "d464b4aedd790170",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "torch.save(ckpt, \"jnu_aerial_new.ckpt\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8da8389eee6bc915",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "LightGaussian"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "409d9666f371fd1f"
  },
  {
   "cell_type": "code",
   "source": [
    "from internal.utils.light_gaussian import get_count_and_score, calculate_v_imp_score, get_prune_mask"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "dc53fde0dd5e5c98",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# get partition image list\n",
    "partition_id = (0, 0)\n",
    "image_list = []\n",
    "with open(os.path.join(partition_base_dir, f\"{partition_id[0]:03d}_{partition_id[1]:03d}.txt\"), \"r\") as f:\n",
    "    for row in f:\n",
    "        image_list.append(row.rstrip(\"\\n\"))\n",
    "\n",
    "# get camera correspond to the image list\n",
    "cameras = get_cameras_by_image_list(image_list)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "35e57449fd5ebf40",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "ckpt = torch.load(\"../outputs/JNUAerial-0526/P_000_000.txt/checkpoints/epoch=100-step=132700.ckpt\", map_location=\"cpu\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a4d8deb25532bcfb",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "partition_xy = partitions[\"xys\"][partition_id_to_index[partition_id]]\n",
    "partition_xy"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "48d68606cdca713",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "xyz = ckpt[\"state_dict\"][\"gaussian_model._xyz\"]\n",
    "reoriented_xyz = xyz @ orientation_transformation[:3, :3].T\n",
    "# include min bound, exclude max bound\n",
    "is_in_partition = torch.logical_and(torch.ge(reoriented_xyz[:, :2], partition_xy),\n",
    "                                    torch.lt(reoriented_xyz[:, :2], partition_xy + 2 * partitions[\"radius\"]))\n",
    "is_in_partition = torch.logical_and(is_in_partition[:, 0], is_in_partition[:, 1])"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "648f5d0dd506c507",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# get Gaussians located in partition to construct a new state_dict\n",
    "state_dict = {}\n",
    "for i in ckpt[\"state_dict\"]:\n",
    "    if i.startswith(\"gaussian_model._\"):\n",
    "        state_dict[i] = ckpt[\"state_dict\"][i][is_in_partition]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8477b4a611507cdf",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "gaussian_model = GaussianModelSimplified.construct_from_state_dict(state_dict, 0, device=\"cuda\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a5b973ed001b4ad5",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "hit_camera_count_total = torch.zeros((gaussian_model.get_xyz.shape[0]), dtype=torch.int,\n",
    "                                     device=gaussian_model.get_xyz.device)\n",
    "opacity_score_total = torch.zeros((gaussian_model.get_xyz.shape[0]), dtype=torch.float,\n",
    "                                  device=gaussian_model.get_xyz.device)\n",
    "alpha_score_total = torch.zeros((gaussian_model.get_xyz.shape[0]), dtype=torch.float,\n",
    "                                device=gaussian_model.get_xyz.device)\n",
    "visibility_score_total = torch.zeros((gaussian_model.get_xyz.shape[0]), dtype=torch.float,\n",
    "                                     device=gaussian_model.get_xyz.device)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "37ccaed0fdad792e",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "for idx, camera in tqdm(enumerate(cameras), total=len(cameras)):\n",
    "    hit_count, opacity_score, alpha_score, visibility_score = GSplatHitPixelCountRenderer.hit_pixel_count(\n",
    "        means3D=gaussian_model.get_xyz,\n",
    "        opacities=gaussian_model.get_opacity,\n",
    "        scales=gaussian_model.get_scaling,\n",
    "        rotations=gaussian_model.get_rotation,\n",
    "        viewpoint_camera=camera.to_device(\"cuda\"),\n",
    "    )\n",
    "    hit_camera_count_total += hit_count\n",
    "    opacity_score_total += opacity_score\n",
    "    alpha_score_total += alpha_score\n",
    "    visibility_score_total += visibility_score"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a3354d0c81cb71c5",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "visibility_close_to_zero = torch.isclose(visibility_score_total, torch.tensor(0.).to(visibility_score_total))\n",
    "visibility_close_to_zero.sum(), visibility_score_total.shape[0]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "4f16527e7fe63a8d",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# gaussian_model.delete_gaussians(visibility_close_to_zero)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "d8caa1009a39f20e",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# visible_gaussians = torch.bitwise_not(visibility_close_to_zero)\n",
    "# hit_camera_count_total = hit_camera_count_total[visible_gaussians]\n",
    "# opacity_score_total = opacity_score_total[visible_gaussians]\n",
    "# alpha_score_total = alpha_score_total[visible_gaussians]\n",
    "# visibility_score_total = visibility_score_total[visible_gaussians]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5e4b027a47481e40",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "# (visibility_score_total > 1).sum(), visibility_score_total.shape[0]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "3bf355a6fb3462ac",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "visibility_score_total_sorted = torch.sort(visibility_score_total, descending=True)\n",
    "visibility_score_total_sorted"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "63b716fc72078823",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "preserve_indices = visibility_score_total_sorted.indices[:int(visibility_score_total_sorted.indices.shape[0] * 0.1)]\n",
    "preserve_indices.shape[0]"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "70f6aaf8237a0434",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "for i in state_dict:\n",
    "    ckpt[\"state_dict\"][i] = state_dict[i][preserve_indices.to(ckpt[\"state_dict\"][i].device)].to(\n",
    "        ckpt[\"state_dict\"][i].device)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5475c8c189ba339e",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "torch.save(ckpt, \"visibility_pruned.ckpt\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9db9872da9f83c2a",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "v_imp_score"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a0f7f6b0d2bff8f6"
  },
  {
   "cell_type": "code",
   "source": [
    "v_imp_score = calculate_v_imp_score(gaussian_model.get_scaling, opacity_score_total, 0.1)\n",
    "v_imp_score, v_imp_score.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "72772dc30f854e34",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "prune_mask = get_prune_mask(0.9, v_imp_score)\n",
    "preserve_mask = torch.bitwise_not(prune_mask)\n",
    "preserve_mask.sum(), preserve_mask.shape"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "baf51a7bb0445ca2",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "for i in state_dict:\n",
    "    ckpt[\"state_dict\"][i] = state_dict[i][preserve_mask.to(ckpt[\"state_dict\"][i].device)].to(\n",
    "        ckpt[\"state_dict\"][i].device)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f15db1b4613691b5",
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "source": [
    "torch.save(ckpt, \"opacity_score_pruned.ckpt\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "dc44c41c1b4e26b8",
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
